網頁

2018年2月14日 星期三

proxmox更換故障的usb系統隨身碟

root@pve5:/var/log# df -h
Filesystem                      Size  Used Avail Use% Mounted on
udev                             32G     0   32G   0% /dev
tmpfs                           6.3G   74M  6.3G   2% /run
rpool/ROOT/pve-1                 12G  3.1G  8.9G  26% /
tmpfs                            32G   60M   32G   1% /dev/shm
tmpfs                           5.0M     0  5.0M   0% /run/lock
tmpfs                            32G     0   32G   0% /sys/fs/cgroup
pve5_zpool                      7.1T  1.2T  5.9T  17% /pve5_zpool
rpool                           8.9G  128K  8.9G   1% /rpool
rpool/ROOT                      8.9G  128K  8.9G   1% /rpool/ROOT


rpool/data                      8.9G  128K  8.9G   1% /rpool/data
/dev/fuse                        30M   48K   30M   1% /etc/pve
pve5:/pve5_zpool                7.1T  1.2T  5.9T  17% /mnt/pve/nfs50
tmpfs                           6.3G     0  6.3G   0% /run/user/0
10.10.10.105:/volume1/F105       19T  465G   18T   3% /mnt/pve/f105
172.22.110.100:/volume1/syn105   19T  465G   18T   3% /mnt/pve/S105
172.22.110.200:/FS/pve_data     8.0T  1.5T  6.6T  19% /mnt/pve/backup
-------------------說明開始------------------------------------------------------------------------------------
觀察一下硬碟掛載的狀況
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# lsblk
NAME    MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0     7:0    0     8G  0 loop
loop1     7:1    0   200G  0 loop
sda       8:0    0   3.7T  0 disk
├─sda1    8:1    0   3.7T  0 part
└─sda9    8:9    0     8M  0 part
sdb       8:16   0   3.7T  0 disk
├─sdb1    8:17   0   3.7T  0 part
└─sdb9    8:25   0     8M  0 part
sdc       8:32   0   3.7T  0 disk
├─sdc1    8:33   0   3.7T  0 part
└─sdc9    8:41   0     8M  0 part
sdd       8:48   0   3.7T  0 disk
├─sdd1    8:49   0   3.7T  0 part
└─sdd9    8:57   0     8M  0 part
sde       8:64   1  14.3G  0 disk
├─sde1    8:65   1  1007K  0 part
├─sde2    8:66   1  14.3G  0 part
└─sde9    8:73   1     8M  0 part
sr0      11:0    1  1024M  0 rom
zd0     230:0    0   1.8G  0 disk
nvme1n1 259:0    0 238.5G  0 disk
nvme0n1 259:1    0 238.5G  0 disk
-------------------說明開始------------------------------------------------------------------------------------
觀察一下磁碟機的狀況的狀況
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# lsblk
NAME    MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0     7:0    0     8G  0 loop
loop1     7:1    0   200G  0 loop
sda       8:0    0   3.7T  0 disk
├─sda1    8:1    0   3.7T  0 part
└─sda9    8:9    0     8M  0 part
sdb       8:16   0   3.7T  0 disk
├─sdb1    8:17   0   3.7T  0 part
└─sdb9    8:25   0     8M  0 part
sdc       8:32   0   3.7T  0 disk
├─sdc1    8:33   0   3.7T  0 part
└─sdc9    8:41   0     8M  0 part
sdd       8:48   0   3.7T  0 disk
├─sdd1    8:49   0   3.7T  0 part
└─sdd9    8:57   0     8M  0 part
sde       8:64   1  14.3G  0 disk
├─sde1    8:65   1  1007K  0 part
├─sde2    8:66   1  14.3G  0 part
└─sde9    8:73   1     8M  0 part
sdf       8:80   1  14.3G  0 disk
├─sdf1    8:81   1  1007K  0 part
├─sdf2    8:82   1  14.3G  0 part
└─sdf9    8:89   1     8M  0 part
sr0      11:0    1  1024M  0 rom
zd0     230:0    0   1.8G  0 disk
nvme1n1 259:0    0 238.5G  0 disk
nvme0n1 259:1    0 238.5G  0 disk
-------------------說明開始------------------------------------------------------------------------------------
usb磁碟重新插上後,再觀察一下硬碟掛載的狀況,sdf出現了。
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# gdisk /dev/sdf
GPT fdisk (gdisk) version 1.0.1

Partition table scan:
  MBR: protective
  BSD: not present
  APM: not present
  GPT: present

Found valid GPT with protective MBR; using GPT.

Command (? for help): l
0700 Microsoft basic data  0c01 Microsoft reserved    2700 Windows RE
3000 ONIE boot             3001 ONIE config           3900 Plan 9
4100 PowerPC PReP boot     4200 Windows LDM data      4201 Windows LDM metadata
4202 Windows Storage Spac  7501 IBM GPFS              7f00 ChromeOS kernel
7f01 ChromeOS root         7f02 ChromeOS reserved     8200 Linux swap
8300 Linux filesystem      8301 Linux reserved        8302 Linux /home
8303 Linux x86 root (/)    8304 Linux x86-64 root (/  8305 Linux ARM64 root (/)
8306 Linux /srv            8307 Linux ARM32 root (/)  8400 Intel Rapid Start
8e00 Linux LVM             a500 FreeBSD disklabel     a501 FreeBSD boot
a502 FreeBSD swap          a503 FreeBSD UFS           a504 FreeBSD ZFS
a505 FreeBSD Vinum/RAID    a580 Midnight BSD data     a581 Midnight BSD boot
a582 Midnight BSD swap     a583 Midnight BSD UFS      a584 Midnight BSD ZFS
a585 Midnight BSD Vinum    a600 OpenBSD disklabel     a800 Apple UFS
a901 NetBSD swap           a902 NetBSD FFS            a903 NetBSD LFS
a904 NetBSD concatenated   a905 NetBSD encrypted      a906 NetBSD RAID
ab00 Recovery HD           af00 Apple HFS/HFS+        af01 Apple RAID
af02 Apple RAID offline    af03 Apple label           af04 AppleTV recovery
af05 Apple Core Storage    bc00 Acronis Secure Zone   be00 Solaris boot
bf00 Solaris root          bf01 Solaris /usr & Mac Z  bf02 Solaris swap
bf03 Solaris backup        bf04 Solaris /var          bf05 Solaris /home
bf06 Solaris alternate se  bf07 Solaris Reserved 1    bf08 Solaris Reserved 2
bf09 Solaris Reserved 3    bf0a Solaris Reserved 4    bf0b Solaris Reserved 5
c001 HP-UX data            c002 HP-UX service         ea00 Freedesktop $BOOT
eb00 Haiku BFS             ed00 Sony system partitio  ed01 Lenovo system partit
ef00 EFI System            ef01 MBR partition scheme  ef02 BIOS boot partition
f800 Ceph OSD              f801 Ceph dm-crypt OSD     f802 Ceph journal
f803 Ceph dm-crypt journa  f804 Ceph disk in creatio  f805 Ceph dm-crypt disk i
fb00 VMWare VMFS           fb01 VMWare reserved       fc00 VMWare kcore crash p
fd00 Linux RAID

Command (? for help): print
Disk /dev/sdf: 30031250 sectors, 14.3 GiB
Logical sector size: 512 bytes
Disk identifier (GUID): F7024BA3-8B3D-40F8-AC62-2F0C166D6F54
Partition table holds up to 128 entries
First usable sector is 34, last usable sector is 30031216
Partitions will be aligned on 2-sector boundaries
Total free space is 0 sectors (0 bytes)

Number  Start (sector)    End (sector)  Size       Code  Name
   1              34            2047   1007.0 KiB  EF02
   2            2048        30014831   14.3 GiB    BF01  zfs
   9        30014832        30031216   8.0 MiB     BF07

Command (? for help): q
-------------------說明開始------------------------------------------------------------------------------------
sdf分割區還是正常的
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://zfsonlinux.org/msg/ZFS-8000-4J
  scan: resilvered 887M in 0h5m with 0 errors on Sat Dec 30 08:42:18 2017
config:

        NAME                     STATE     READ WRITE CKSUM
        rpool                    DEGRADED     0     0     0
          mirror-0               DEGRADED     0     0     0
            7054290625360323400  FAULTED      0     0     0  was /dev/sde2
            sde2                 ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
rpool還是沒法看到sdf的資料
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# ls -l /dev/disk/by-id
total 0
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-HL-DT-ST_DVDRAM_GTC0N_KZPG6HE5719 -> ../../sr0
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA -> ../../sda
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA -> ../../sdd
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA -> ../../sdb
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA -> ../../sdc
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA-part9 -> ../../sdc9
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-ADATA_SX8000NP_2H1120031632 -> ../../nvme0n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-ADATA_SX8000NP_2H1120032587 -> ../../nvme1n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-nvme.126f-324831313230303331363332-4144415441205358383030304e50-00000001 -> ../../nvme0n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-nvme.126f-324831313230303332353837-4144415441205358383030304e50-00000001 -> ../../nvme1n1
lrwxrwxrwx 1 root root  9 Jan  5 10:15 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0 -> ../../sde
lrwxrwxrwx 1 root root 10 Jan  5 10:15 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part1 -> ../../sde1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part2 -> ../../sde2
lrwxrwxrwx 1 root root 10 Jan  5 10:15 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part9 -> ../../sde9
lrwxrwxrwx 1 root root  9 Feb 14 10:27 usb-SanDisk_Ultra_Fit_4C531001500328116591-0:0 -> ../../sdf
lrwxrwxrwx 1 root root 10 Feb 14 10:27 usb-SanDisk_Ultra_Fit_4C531001500328116591-0:0-part1 -> ../../sdf1
lrwxrwxrwx 1 root root 10 Feb 14 10:27 usb-SanDisk_Ultra_Fit_4C531001500328116591-0:0-part2 -> ../../sdf2
lrwxrwxrwx 1 root root 10 Feb 14 10:27 usb-SanDisk_Ultra_Fit_4C531001500328116591-0:0-part9 -> ../../sdf9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978ba0132f -> ../../sda
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba0132f-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba0132f-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978ba80b54 -> ../../sdd
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba80b54-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba80b54-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978c6801f5 -> ../../sdb
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978c6801f5-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978c6801f5-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003979bd8227e -> ../../sdc
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003979bd8227e-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003979bd8227e-part9 -> ../../sdc9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x5001480000000000 -> ../../sr0
-------------------說明開始------------------------------------------------------------------------------------
rpool還是沒法看到sdf的資料
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/var/log# gdisk /dev/sde
GPT fdisk (gdisk) version 1.0.1

Partition table scan:
  MBR: protective
  BSD: not present
  APM: not present
  GPT: present

Found valid GPT with protective MBR; using GPT.

Command (? for help): print
Disk /dev/sde: 30031250 sectors, 14.3 GiB
Logical sector size: 512 bytes
Disk identifier (GUID): B10EC989-56D1-42A0-958E-C929BBF85923
Partition table holds up to 128 entries
First usable sector is 34, last usable sector is 30031216
Partitions will be aligned on 2-sector boundaries
Total free space is 0 sectors (0 bytes)

Number  Start (sector)    End (sector)  Size       Code  Name
   1              34            2047   1007.0 KiB  EF02
   2            2048        30014831   14.3 GiB    BF01  zfs
   9        30014832        30031216   8.0 MiB     BF07

Command (? for help): q
-------------------說明開始------------------------------------------------------------------------------------
看一下sde的分割區
-------------------說明結束------------------------------------------------------------------------------------

root@pve5:/var/log# gdisk /dev/sdf
GPT fdisk (gdisk) version 1.0.1

Partition table scan:
  MBR: protective
  BSD: not present
  APM: not present
  GPT: present

Found valid GPT with protective MBR; using GPT.

Command (? for help): print
Disk /dev/sdf: 30031250 sectors, 14.3 GiB
Logical sector size: 512 bytes
Disk identifier (GUID): F7024BA3-8B3D-40F8-AC62-2F0C166D6F54
Partition table holds up to 128 entries
First usable sector is 34, last usable sector is 30031216
Partitions will be aligned on 2-sector boundaries
Total free space is 0 sectors (0 bytes)

Number  Start (sector)    End (sector)  Size       Code  Name
   1              34            2047   1007.0 KiB  EF02
   2            2048        30014831   14.3 GiB    BF01  zfs
   9        30014832        30031216   8.0 MiB     BF07

Command (? for help): q
-------------------說明開始------------------------------------------------------------------------------------
看一下sdf的分割區
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://zfsonlinux.org/msg/ZFS-8000-4J
  scan: resilvered 887M in 0h5m with 0 errors on Sat Dec 30 08:42:18 2017
config:

        NAME                     STATE     READ WRITE CKSUM
        rpool                    DEGRADED     0     0     0
          mirror-0               DEGRADED     0     0     0
            7054290625360323400  FAULTED      0     0     0  was /dev/sde2
            sde2                 ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
rpool還是沒法看到sdf的資料
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool detach rpool /dev/sdf2
cannot detach /dev/sdf2: no such device in pool
-------------------說明開始------------------------------------------------------------------------------------
rpool要卸載sdf2,但是說rpool沒有這個裝置
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# ls -l /dev/disk/by-id
total 0
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-HL-DT-ST_DVDRAM_GTC0N_KZPG6HE5719 -> ../../sr0
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA -> ../../sda
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA -> ../../sdd
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17C8K291FJKA-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA -> ../../sdb
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA -> ../../sdc
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA-part9 -> ../../sdc9
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-ADATA_SX8000NP_2H1120031632 -> ../../nvme0n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-ADATA_SX8000NP_2H1120032587 -> ../../nvme1n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-nvme.126f-324831313230303331363332-4144415441205358383030304e50-00000001 -> ../../nvme0n1
lrwxrwxrwx 1 root root 13 Jan  5 10:15 nvme-nvme.126f-324831313230303332353837-4144415441205358383030304e50-00000001 -> ../../nvme1n1
lrwxrwxrwx 1 root root  9 Feb 14 10:37 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0 -> ../../sde
lrwxrwxrwx 1 root root 10 Feb 14 10:37 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part1 -> ../../sde1
lrwxrwxrwx 1 root root 10 Feb 14 10:37 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part2 -> ../../sde2
lrwxrwxrwx 1 root root 10 Feb 14 10:37 usb-SanDisk_Ultra_Fit_4C530001050328117214-0:0-part9 -> ../../sde9
lrwxrwxrwx 1 root root  9 Feb 14 12:08 usb-SanDisk_Ultra_Fit_4C530001250410116375-0:0 -> ../../sdf
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978ba0132f -> ../../sda
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba0132f-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba0132f-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978ba80b54 -> ../../sdd
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba80b54-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978ba80b54-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003978c6801f5 -> ../../sdb
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978c6801f5-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003978c6801f5-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x500003979bd8227e -> ../../sdc
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003979bd8227e-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Jan  5 10:15 wwn-0x500003979bd8227e-part9 -> ../../sdc9
lrwxrwxrwx 1 root root  9 Jan  5 10:15 wwn-0x5001480000000000 -> ../../sr0
-------------------說明開始------------------------------------------------------------------------------------
插上新的usb檢查一下,抓到是sdf
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# sgdisk -R /dev/sdf /dev/sde
The operation has completed successfully.
-------------------說明開始------------------------------------------------------------------------------------
新的USB一樣偵測到為sdf,pool中好的USB為sde,利用以下方式來還原
把舊的sde的GPT分割資訊 複製到新的sdf (注意順序,舊的device在後面)
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# sgdisk -G /dev/sdf
The operation has completed successfully.
-------------------說明開始------------------------------------------------------------------------------------
重新產生sdf的gpt id
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# dd if=/dev/sde1 of=/dev/sdf1
2014+0 records in
2014+0 records out
1031168 bytes (1.0 MB, 1007 KiB) copied, 0.3237 s, 3.2 MB/s

root@pve5:/dev# dd if=/dev/sde9 of=/dev/sdf9
16385+0 records in
16385+0 records out
8389120 bytes (8.4 MB, 8.0 MiB) copied, 0.459313 s, 18.3 MB/s
-------------------說明開始------------------------------------------------------------------------------------
利用dd拷貝sdf1與sdf9
-------------------說明結束------------------------------------------------------------------------------------

root@pve5:/dev# zpool attach rpool mirror /dev/sde2 /dev/sdf2
too many arguments
usage:
        attach [-f] [-o property=value] <pool> <device> <new-device>
-------------------說明開始------------------------------------------------------------------------------------
有錯誤訊息,too many arguments,表示語法有誤??
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# gdisk /dev/sdf
GPT fdisk (gdisk) version 1.0.1

Partition table scan:
  MBR: protective
  BSD: not present
  APM: not present
  GPT: present

Found valid GPT with protective MBR; using GPT.

Command (? for help): print
Disk /dev/sdf: 30031250 sectors, 14.3 GiB
Logical sector size: 512 bytes
Disk identifier (GUID): 5489AECD-DD5E-420C-91B7-29BAFE645A6D
Partition table holds up to 128 entries
First usable sector is 34, last usable sector is 30031216
Partitions will be aligned on 2-sector boundaries
Total free space is 0 sectors (0 bytes)

Number  Start (sector)    End (sector)  Size       Code  Name
   1              34            2047   1007.0 KiB  EF02
   2            2048        30014831   14.3 GiB    BF01  zfs
   9        30014832        30031216   8.0 MiB     BF07

Command (? for help): q
-------------------說明開始------------------------------------------------------------------------------------
看一下sdf的分割區,已經正常分割。
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool attach rpool /dev/sde2 /dev/sdf2
Make sure to wait until resilver is done before rebooting.
-------------------說明開始------------------------------------------------------------------------------------
開始複製,訊息說明等待同步完成,才能重新開機
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices is currently being resilvered.  The pool will
        continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
  scan: resilver in progress since Wed Feb 14 12:18:07 2018
        151M scanned out of 3.05G at 6.87M/s, 0h7m to go
        151M resilvered, 4.84% done
config:

        NAME                     STATE     READ WRITE CKSUM
        rpool                    DEGRADED     0     0     0
          mirror-0               DEGRADED     0     0     0
            7054290625360323400  FAULTED      0     0     0  was /dev/sde2
            sde2                 ONLINE       0     0     0
            sdf2                 ONLINE       0     0     0  (resilvering)

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
sde2正在複製到sdf2,但是那個7054290625360323400  FAULTED      0     0     0  was /dev/sde2  還在............??
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool replace rpool 7054290625360323400 sdf2
invalid vdev specification
use '-f' to override the following errors:
/dev/sdf2 is part of active pool 'rpool'
-------------------說明開始------------------------------------------------------------------------------------
改用zpool replace -f rpool 7054290625360323400 sdf2..........
顯示目前sdf2已經使用中,所以指令失敗。
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://zfsonlinux.org/msg/ZFS-8000-4J
  scan: resilvered 3.05G in 0h6m with 0 errors on Wed Feb 14 12:24:41 2018
config:

        NAME                     STATE     READ WRITE CKSUM
        rpool                    DEGRADED     0     0     0
          mirror-0               DEGRADED     0     0     0
            7054290625360323400  FAULTED      0     0     0  was /dev/sde2
            sde2                 ONLINE       0     0     0
            sdf2                 ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
改用zpool replace -f rpool 7054290625360323400 sdf2..........
-------------------說明結束------------------------------------------------------------------------------------


root@pve5:/dev# zpool detach rpool /dev/sdf2
-------------------說明開始------------------------------------------------------------------------------------
把sdf2從rpool卸除
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://zfsonlinux.org/msg/ZFS-8000-4J
  scan: resilvered 3.05G in 0h6m with 0 errors on Wed Feb 14 12:24:41 2018
config:

        NAME                     STATE     READ WRITE CKSUM
        rpool                    DEGRADED     0     0     0
          mirror-0               DEGRADED     0     0     0
            7054290625360323400  FAULTED      0     0     0  was /dev/sde2
            sde2                 ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
sdf2被卸除,所以不見了
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool replace -f rpool 7054290625360323400 sdf2
Make sure to wait until resilver is done before rebooting.
-------------------說明開始------------------------------------------------------------------------------------
改用zpool replace -f rpool 7054290625360323400 sdf2
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status
  pool: pve5_zpool
 state: ONLINE
  scan: scrub repaired 0B in 1h22m with 0 errors on Sun Feb 11 01:46:05 2018
config:

        NAME                                      STATE     READ WRITE CKSUM
        pve5_zpool                                ONLINE       0     0     0
          mirror-0                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C7K58FFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17CWK01VFJKA  ONLINE       0     0     0
          mirror-1                                ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_27GEK4TBFJKA  ONLINE       0     0     0
            ata-TOSHIBA_MG04ACA400E_17C8K291FJKA  ONLINE       0     0     0
        logs
          nvme-ADATA_SX8000NP_2H1120032587        ONLINE       0     0     0
        cache
          nvme-ADATA_SX8000NP_2H1120031632        ONLINE       0     0     0

errors: No known data errors

  pool: rpool
 state: DEGRADED
status: One or more devices is currently being resilvered.  The pool will
        continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
  scan: resilver in progress since Wed Feb 14 12:34:39 2018
        70.0M scanned out of 3.05G at 6.37M/s, 0h7m to go
        69.7M resilvered, 2.24% done
config:

        NAME                       STATE     READ WRITE CKSUM
        rpool                      DEGRADED     0     0     0
          mirror-0                 DEGRADED     0     0     0
            replacing-0            DEGRADED     0     0     0
              7054290625360323400  FAULTED      0     0     0  was /dev/sde2
              sdf2                 ONLINE       0     0     0  (resilvering)
            sde2                   ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
同步中....
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status rpool
  pool: rpool
 state: DEGRADED
status: One or more devices is currently being resilvered.  The pool will
        continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
  scan: resilver in progress since Wed Feb 14 12:34:39 2018
        2.88G scanned out of 3.05G at 7.99M/s, 0h0m to go
        2.87G resilvered, 94.30% done
config:

        NAME                       STATE     READ WRITE CKSUM
        rpool                      DEGRADED     0     0     0
          mirror-0                 DEGRADED     0     0     0
            replacing-0            DEGRADED     0     0     0
              7054290625360323400  FAULTED      0     0     0  was /dev/sde2
              sdf2                 ONLINE       0     0     0  (resilvering)
            sde2                   ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
可以看到同步的進度  94.30%
-------------------說明結束------------------------------------------------------------------------------------
root@pve5:/dev# zpool status rpool
  pool: rpool
 state: ONLINE
  scan: resilvered 3.05G in 0h6m with 0 errors on Wed Feb 14 12:41:14 2018
config:

        NAME        STATE     READ WRITE CKSUM
        rpool       ONLINE       0     0     0
          mirror-0  ONLINE       0     0     0
            sdf2    ONLINE       0     0     0
            sde2    ONLINE       0     0     0

errors: No known data errors
-------------------說明開始------------------------------------------------------------------------------------
同步完成,正常運作
-------------------說明結束------------------------------------------------------------------------------------

沒有留言:

張貼留言