LINUX.ORG.RU

История изменений

Исправление Twissel, (текущая версия) :

#!/bin/sh

#Setup/variables:

#Each snapshot name must be unique, timestamp is a good choice.
#You can also use Solaris date, but I don't know the correct syntax.
snapshot_string=DO_NOT_DELETE_remote_replication_
timestamp=$(/usr/bin/date '+%Y%m%d%H%M%S')
source_pool=data
destination_pool=data2
new_snap="$source_pool"@"$snapshot_string""$timestamp"
destination_host=s2.local.site
file="test.lock"
# Create first recursive snapshot of the whole pool.
create_full_snapshot() {
  zfs snapshot -r "$new_snap"
  # Initial replication via SSH.
  zfs send -R "$new_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"
  touch $file
}

create_incremental_snapshot() {

	# Incremental sends:

	# Get old snapshot name.
	old_snap=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$source_pool"@"$snapshot_string" | tail --lines=1)
	# Create new recursive snapshot of the whole pool.
	zfs snapshot -r "$new_snap"
	# Incremental replication via SSH.
	zfs send -R -I "$old_snap" "$new_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"

}

if [ -e ${file} ]; then
	create_incremental_snapshot
   else
      create_full_snapshot
fi      
# Delete older snaps on the local source (grep -v inverts the selection)
delete_from=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$snapshot_string" | grep -v "$timestamp")
for snap in $delete_from; do
    zfs destroy "$snap"
done

Говнокод все-таки работает, а код из ОП даже с разными именами снепшотов, даёт ошибку

#!/bin/sh

#Setup/variables:

#Each snapshot name must be unique, timestamp is a good choice.
#You can also use Solaris date, but I don't know the correct syntax.
snapshot_string=DO_NOT_DELETE_remote_replication_
timestamp=$(/usr/bin/date '+%Y%m%d%H%M%S')
source_pool=data
destination_pool=data
new_full_snap="$source_pool"@"$snapshot_string""$timestamp""f"
new_inc_snap="$source_pool"@"$snapshot_string""$timestamp""i"
destination_host=s2.local.site

# Initial send:

# Create first recursive snapshot of the whole pool.
zfs snapshot -r "$new_full_snap"
# Initial replication via SSH.
zfs send -R "$new_full_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"

# Incremental sends:

# Get old snapshot name.
old_snap=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$source_pool"@"$snapshot_string" | tail --lines=1)
# Create new recursive snapshot of the whole pool.
zfs snapshot -r "$new_inc_snap"
# Incremental replication via SSH.
zfs send -R -I "$old_snap" "$new_inc_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"
# Delete older snaps on the local source (grep -v inverts the selection)
delete_from=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$snapshot_string" | grep -v "$timestamp")
for snap in $delete_from; do
    zfs destroy "$snap"
done

Ошибка

cannot receive new filesystem stream: destination has snapshots (eg. data2@DO_NOT_DELETE_remote_replication_20201208215316i)
must destroy them to overwrite it
cannot receive incremental stream: most recent snapshot of data2 does not
match incremental source

Где я налажал интересно?

Исходная версия Twissel, :

неправильно вызвал функцию

#!/bin/sh

#Setup/variables:

#Each snapshot name must be unique, timestamp is a good choice.
#You can also use Solaris date, but I don't know the correct syntax.
snapshot_string=DO_NOT_DELETE_remote_replication_
timestamp=$(/usr/bin/date '+%Y%m%d%H%M%S')
source_pool=data
destination_pool=data2
new_snap="$source_pool"@"$snapshot_string""$timestamp"
destination_host=s2.local.site
file="test.lock"
# Create first recursive snapshot of the whole pool.
create_full_snapshot() {
  zfs snapshot -r "$new_snap"
  # Initial replication via SSH.
  zfs send -R "$new_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"
  touch $file
}

create_incremental_snapshot() {

	# Incremental sends:

	# Get old snapshot name.
	old_snap=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$source_pool"@"$snapshot_string" | tail --lines=1)
	# Create new recursive snapshot of the whole pool.
	zfs snapshot -r "$new_snap"
	# Incremental replication via SSH.
	zfs send -R -I "$old_snap" "$new_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"

}

if [ -e ${file} ]; then
	create_incremental_snapshot
   else
      create_full_snapshot
fi      
# Delete older snaps on the local source (grep -v inverts the selection)
delete_from=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$snapshot_string" | grep -v "$timestamp")
for snap in $delete_from; do
    zfs destroy "$snap"
done

Говнокод все-таки работает, а код из ОП даже с разными именами снепшотов, даёт ошибку

#!/bin/sh

#Setup/variables:

#Each snapshot name must be unique, timestamp is a good choice.
#You can also use Solaris date, but I don't know the correct syntax.
snapshot_string=DO_NOT_DELETE_remote_replication_
timestamp=$(/usr/bin/date '+%Y%m%d%H%M%S')
source_pool=data
destination_pool=data
new_full_snap="$source_pool"@"$snapshot_string""$timestamp""f"
new_inc_snap="$source_pool"@"$snapshot_string""$timestamp""i"
destination_host=s2.local.site

# Initial send:

# Create first recursive snapshot of the whole pool.
zfs snapshot -r "$new_full_snap"
# Initial replication via SSH.
zfs send -R "$new_full_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"

# Incremental sends:

# Get old snapshot name.
old_snap=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$source_pool"@"$snapshot_string" | tail --lines=1)
# Create new recursive snapshot of the whole pool.
zfs snapshot -r "$new_inc_snap"
# Incremental replication via SSH.
zfs send -R -I "$old_snap" "$new_inc_snap" | ssh "$destination_host" zfs recv -Fdu "$destination_pool"
# Delete older snaps on the local source (grep -v inverts the selection)
delete_from=$(zfs list -H -o name -t snapshot -r "$source_pool" | grep "$snapshot_string" | grep -v "$timestamp")
for snap in $delete_from; do
    zfs destroy "$snap"
done

Ошибка

cannot receive new filesystem stream: destination has snapshots (eg. data2@DO_NOT_DELETE_remote_replication_20201208215316i)
must destroy them to overwrite it
cannot receive incremental stream: most recent snapshot of data2 does not
match incremental source

Где я налажал интересно?