Update zfs crypt setup
This commit is contained in:
@@ -18,7 +18,7 @@ systemctl restart zfs-share
|
||||
## Initial ZFS Setup
|
||||
### Find disk serial numbers
|
||||
```shell
|
||||
for i in a b c d e f g; do echo -n "/dev/sd$i: "; hdparm -I /dev/sd$i | awk '/Serial Number/ {print $3}'; done
|
||||
for i in a b c d e f g h i; do echo -n "/dev/sd$i: "; hdparm -I /dev/sd$i | awk '/Serial Number/ {print $3}'; done
|
||||
```
|
||||
### Wipe disks
|
||||
```shell
|
||||
@@ -29,14 +29,23 @@ sgdisk --zap-all /dev/sdX
|
||||
```shell
|
||||
sgdisk -n1:0:0 -t1:BF01 /dev/sdX
|
||||
```
|
||||
### Format LUKS containers (and set passphrase)
|
||||
```shell
|
||||
cryptsetup luksFormat /dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP32E-part1
|
||||
cryptsetup luksFormat /dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PPLPH-part1
|
||||
cryptsetup luksFormat /dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP0K1-part1
|
||||
cryptsetup luksFormat /dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WXB1HB4MJCMM-part1
|
||||
```
|
||||
### Create zpool
|
||||
```shell
|
||||
zpool create -o ashift=12 -O atime=off -O compression=lz4 \
|
||||
zv1 raidz2 \
|
||||
/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP32E-part1 \
|
||||
/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PPLPH-part1 \
|
||||
/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP0K1-part1 \
|
||||
/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WXB1HB4MJCMM-part1
|
||||
./mountVolumes.sh mount
|
||||
zpool create -o ashift=12 -o autoexpand=on -o autoreplace=on -O atime=off -O compression=lz4 \
|
||||
-O acltype=posixacl -O xattr=sa zv1 raidz2 \
|
||||
/dev/mapper/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP32E-part1 \
|
||||
/dev/mapper/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PPLPH-part1 \
|
||||
/dev/mapper/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP0K1-part1 \
|
||||
/dev/mapper/ata-WDC_WD60EFRX-68L0BN1_WD-WXB1HB4MJCMM-part1
|
||||
zpool add zv1 log mirror /dev/nvme0n1p5 /dev/nvme1n1p5
|
||||
```
|
||||
### Create datasets
|
||||
```shell
|
||||
@@ -46,13 +55,13 @@ zfs create zv1/fotos
|
||||
```
|
||||
### Set dataset quota
|
||||
```shell
|
||||
zfs set quota=1T zv1/daten zv1/fotos zv1/homes
|
||||
zfs set quota=1T zv1/homes zv1/daten zv1/fotos
|
||||
```
|
||||
### Transfer data
|
||||
```shell
|
||||
rsync --stats -avhP --exclude-from=/var/tmp/ignorelist root@atlas:/home/ /zv1/homes/
|
||||
rsync --stats -avhP --exclude Pictures root@atlas:/mnt/save/fet_daten/ /zv1/daten/
|
||||
rsync --stats -avhP root@atlas:/mnt/save/fet_daten/Pictures/ /zv1/fotos/
|
||||
rsync --stats -avhP root@atlas:/home/ /zv1/homes/
|
||||
```
|
||||
### Before reinstalling
|
||||
```shell
|
||||
|
||||
135
roles/ariane/files/zfs_mount.sh
Normal file
135
roles/ariane/files/zfs_mount.sh
Normal file
@@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://bitbucket.org/dewoodruff/zfs-on-linux-luks-mountvolumes/src/5836def278a3e462f1f508ba02b7fa236dd28717/mountVolumes.sh
|
||||
|
||||
#list our zpools to be mounted, one per line, no delimiter
|
||||
pools=(
|
||||
"zv1"
|
||||
)
|
||||
#list all devs and their aliases to be used with luksOpen
|
||||
declare -A devs=(
|
||||
["/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP32E-part1"]="ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP32E-part1"
|
||||
["/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PPLPH-part1"]="ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PPLPH-part1"
|
||||
["/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP0K1-part1"]="ata-WDC_WD60EFRX-68L0BN1_WD-WX21D36PP0K1-part1"
|
||||
["/dev/disk/by-id/ata-WDC_WD60EFRX-68L0BN1_WD-WXB1HB4MJCMM-part1"]="ata-WDC_WD60EFRX-68L0BN1_WD-WXB1HB4MJCMM-part1"
|
||||
)
|
||||
#set your log file name
|
||||
LOG=mountVolumes.log
|
||||
|
||||
# the real work happens below
|
||||
activePools=()
|
||||
date >> $LOG
|
||||
function getPoolStatus {
|
||||
echo "Checking pool status:" | tee -a $LOG
|
||||
for pool in "${pools[@]}"
|
||||
do
|
||||
echo -en "\t$pool: " | tee -a $LOG
|
||||
status=`zpool status $pool 2>&1 | grep "state:" | cut -f2 -d:`
|
||||
if [ -z "$status" ];
|
||||
then
|
||||
echo "unknown - not imported" | tee -a $LOG
|
||||
else
|
||||
echo $status | tee -a $LOG
|
||||
activePools+=($pool)
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function exportActivePools {
|
||||
if [ -n "$activePools" ];
|
||||
then
|
||||
echo -n "Exporting pools... " | tee -a $LOG
|
||||
for pool in "${activePools[@]}"
|
||||
do
|
||||
zpool export -f $pool 2>&1 1>>$LOG || { echo "Problem exporting $pool!" | tee -a $LOG; exit 0; }
|
||||
done
|
||||
echo " done."
|
||||
fi
|
||||
}
|
||||
|
||||
function importPools {
|
||||
echo -n "Importing pools..."
|
||||
for pool in "${pools[@]}"
|
||||
do
|
||||
zpool import $pool 2>&1 1>>$LOG || { echo "Problem importing $pool!" | tee -a $LOG; exit 0; }
|
||||
done
|
||||
echo " done."
|
||||
}
|
||||
|
||||
function closeAllLUKS {
|
||||
echo "Making sure all LUKS disks are closed..."
|
||||
for dev in "${devs[@]}"
|
||||
do
|
||||
#echo $dev
|
||||
cryptsetup close $dev 2>&1 | 1>>$LOG || { echo "Problem closing $dev!" | tee -a $LOG; exit 0; }
|
||||
|
||||
done
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
function openAllLUKS {
|
||||
read -s -p "Enter LUKS passphrase: " pass1
|
||||
echo ""
|
||||
read -s -p "Confirm LUKS passphrase: " pass2
|
||||
echo ""
|
||||
|
||||
if [ "$pass1" = "$pass2" ];
|
||||
then
|
||||
for dev in "${!devs[@]}"
|
||||
do
|
||||
echo "Opening $dev to ${devs["$dev"]}" | tee -a $LOG
|
||||
echo "$pass1" | cryptsetup luksOpen $dev ${devs[$dev]} 2>&1 1>>$LOG || { echo "Problem opening $dev!" | tee -a $LOG; exit 0; }
|
||||
done
|
||||
else
|
||||
echo "ERROR: passphrases don't match!"
|
||||
fi
|
||||
pass1=""
|
||||
pass2=""
|
||||
|
||||
}
|
||||
|
||||
function LUKSStatus {
|
||||
for dev in "${devs[@]}"
|
||||
do
|
||||
cryptsetup status $dev | head -1 | tee -a $LOG
|
||||
done | sort
|
||||
}
|
||||
|
||||
function unmount {
|
||||
zfs unshare -a
|
||||
getPoolStatus
|
||||
exportActivePools
|
||||
closeAllLUKS
|
||||
getPoolStatus
|
||||
}
|
||||
|
||||
if [ "$1" = "status" ];
|
||||
then
|
||||
LUKSStatus
|
||||
getPoolStatus
|
||||
elif [ "$1" = "mount" ];
|
||||
then
|
||||
getPoolStatus
|
||||
exportActivePools
|
||||
closeAllLUKS
|
||||
openAllLUKS
|
||||
importPools
|
||||
getPoolStatus
|
||||
zfs share -a
|
||||
elif [ "$1" = "unmount" ];
|
||||
then
|
||||
unmount
|
||||
elif [ "$1" = "reboot" ];
|
||||
then
|
||||
unmount
|
||||
reboot
|
||||
elif [ "$1" = "shutdown" ];
|
||||
then
|
||||
unmount
|
||||
shutdown -h now
|
||||
elif [ "$1" = "freespace" ];
|
||||
then
|
||||
zfs list
|
||||
else
|
||||
echo "Usage: ./mountVolumes.sh [status|mount|unmount|reboot|shutdown|freespace]"
|
||||
fi
|
||||
@@ -32,9 +32,12 @@
|
||||
- name: zfs - set quota
|
||||
zfs: "name={{ item }} state=present quota=1T"
|
||||
with_items:
|
||||
- zv1/homes
|
||||
- zv1/daten
|
||||
- zv1/fotos
|
||||
- zv1/homes
|
||||
|
||||
- name : zfs - zfs_mount.sh
|
||||
copy: src=zfs_mount.sh dest=/usr/local/bin/ owner=root group=root mode=0755
|
||||
|
||||
- name : zfs - zfs_health.sh
|
||||
copy: src=zfs_health.sh dest=/usr/local/bin/ owner=root group=root mode=0755
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
:INPUT DROP [0:0]
|
||||
:FORWARD DROP [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
-A INPUT -s 192.168.86.0/24 -d 192.168.86.0/24 -p udp -m multiport --dports 10053,111,2049,32769,875,892 -m state --state NEW,ESTABLISHED -j ACCEPT
|
||||
-A INPUT -s 192.168.86.0/24 -d 192.168.86.0/24 -p tcp -m multiport --dports 10053,111,2049,32803,875,892 -m state --state NEW,ESTABLISHED -j ACCEPT
|
||||
-A OUTPUT -s 192.168.86.0/24 -d 192.168.86.0/24 -p udp -m multiport --sports 10053,111,2049,32769,875,892 -m state --state ESTABLISHED -j ACCEPT
|
||||
-A OUTPUT -s 192.168.86.0/24 -d 192.168.86.0/24 -p tcp -m multiport --sports 10053,111,2049,32803,875,892 -m state --state ESTABLISHED -j ACCEPT
|
||||
-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
|
||||
Reference in New Issue
Block a user