wiki:TipAndDoc/HA/NFS/log

Version 2 (modified by mitty, 13 years ago) (diff)

--

  • verbose log of ../

mdadm -D /dev/md0

  • mitty@ubuntu-haa:~$ sudo mdadm -D /dev/md0
    /dev/md0:
            Version : 00.90
      Creation Time : Thu Mar 24 17:31:36 2011
         Raid Level : raid1
         Array Size : 2047936 (2000.27 MiB 2097.09 MB)
      Used Dev Size : 2047936 (2000.27 MiB 2097.09 MB)
       Raid Devices : 2
      Total Devices : 2
    Preferred Minor : 0
        Persistence : Superblock is persistent
    
        Update Time : Thu Mar 24 17:31:47 2011
              State : clean
     Active Devices : 2
    Working Devices : 2
     Failed Devices : 0
      Spare Devices : 0
    
               UUID : 32578407:6f33f50b:893cf340:745f5dce (local to host ubuntu-haa)
             Events : 0.34
    
        Number   Major   Minor   RaidDevice State
           0       8       17        0      active sync   /dev/sdb1
           1       8       33        1      active sync   /dev/sdc1
    

dpkg-reconfigure mdadm

  • mitty@ubuntu-haa:~$ sudo dpkg-reconfigure mdadm
      ┌──────────────────────────┤ Configuring mdadm ├─
      │                                                                          │
      │ If the kernel supports it (versions greater than 2.6.14), mdadm can      │
      │ periodically check the redundancy of MD arrays (RAIDs). This may be a    │
      │ resource-intensive process, depending on the local setup, but it could   │
      │ help prevent rare cases of data loss. Note that this is a read-only      │
      │ check unless errors are found; if errors are found, mdadm will try to    │
      │ correct them, which may result in write access to the media.             │
      │                                                                          │
      │ The default, if turned on, is to check on the first Sunday of every      │
      │ month at 01:06.                                                          │
      │                                                                          │
      │ Should mdadm run monthly redundancy checks of the MD arrays?             │
      │                                                                          │
      │                  [[<Yes>]]                     <No>                      │
      │                                                                          │
      └──────────────────────────────────────
    
    (snip)
    
      ┌──────────────────────────┤ Configuring mdadm ├─
      │                                                                         │
      │ The MD (RAID) monitor daemon sends email notifications in response to   │
      │ important MD events (such as a disk failure).                           │
      │                                                                         │
      │ Enabling this option is recommended.                                    │
      │                                                                         │
      │ Do you want to start the MD monitoring daemon?                          │
      │                                                                         │
      │                  [[<Yes>]]                     <No>                     │
      │                                                                         │
      └──────────────────────────────────────
    
    (snip)
    
       ┌─────────────────────────┤ Configuring mdadm ├──
       │ Please enter the email address of the user who should get the email   │
       │ notifications for important MD events.                                │
       │                                                                       │
       │ Recipient for email notifications:                                    │
       │                                                                       │
       │ root_________________________________________________________________ │
       │                                                                       │
       │                                <Ok>                                   │
       │                                                                       │
       └───────────────────────────────────────
    
    (snip)
    
     ┌───────────────────────────┤ Configuring mdadm ├ 
     │                                                                           │
     │ If your root filesystem is on a RAID, and a disk is missing at boot, it   │
     │ can either boot with the degraded array, or hold the system at a          │
     │ recovery shell.                                                           │
     │                                                                           │
     │ Running a system with a degraded RAID could result in permanent data      │
     │ loss if it suffers another hardware fault.                                │
     │                                                                           │
     │ If you do not have access to the server console to use the recovery       │
     │ shell, you might answer "yes" to enable the system to boot unattended.    │
     │                                                                           │
     │ Do you want to boot your system if your RAID becomes degraded?            │
     │                                                                           │
     │                  [[<Yes>]]                     <No>                       │
     │                                                                           │
     └───────────────────────────────────────
    

pvcreate /dev/md0

  • mitty@ubuntu-haa:~$ sudo pvcreate /dev/md0
      Physical volume "/dev/md0" successfully created
    
  • mitty@ubuntu-haa:~$ sudo pvdisplay -C
      PV         VG   Fmt  Attr PSize PFree
      /dev/md0        lvm2 --   1.95g 1.95g
    
  • mitty@ubuntu-haa:~$ sudo pvdisplay
      "/dev/md0" is a new physical volume of "1.95 GiB"
      --- NEW Physical volume ---
      PV Name               /dev/md0
      VG Name
      PV Size               1.95 GiB
      Allocatable           NO
      PE Size               0
      Total PE              0
      Free PE               0
      Allocated PE          0
      PV UUID               Z2JXRP-fa5g-SYS5-xzMs-Lq8C-1Jbh-QPKihr
    
    

vgcreate -s 32 vgnfs /dev/md0

  • mitty@ubuntu-haa:~$ sudo vgcreate -s 32 vgnfs /dev/md0
      Volume group "vgnfs" successfully created
    
  • mitty@ubuntu-haa:~$ sudo vgdisplay -C
      VG    #PV #LV #SN Attr   VSize VFree
      vgnfs   1   0   0 wz--n- 1.94g 1.94g
    
  • mitty@ubuntu-haa:~$ sudo vgdisplay
      --- Volume group ---
      VG Name               vgnfs
      System ID
      Format                lvm2
      Metadata Areas        1
      Metadata Sequence No  1
      VG Access             read/write
      VG Status             resizable
      MAX LV                0
      Cur LV                0
      Open LV               0
      Max PV                0
      Cur PV                1
      Act PV                1
      VG Size               1.94 GiB
      PE Size               32.00 MiB
      Total PE              62
      Alloc PE / Size       0 / 0
      Free  PE / Size       62 / 1.94 GiB
      VG UUID               I6vVoh-6gCJ-9uvA-v2MV-Fyva-7J8v-Cvftfi
    
    
  • mitty@ubuntu-haa:~$ sudo pvdisplay -C
      PV         VG    Fmt  Attr PSize PFree
      /dev/md0   vgnfs lvm2 a-   1.94g 1.94g
    
  • mitty@ubuntu-haa:~$ sudo pvdisplay
      --- Physical volume ---
      PV Name               /dev/md0
      VG Name               vgnfs
      PV Size               1.95 GiB / not usable 15.94 MiB
      Allocatable           yes
      PE Size               32.00 MiB
      Total PE              62
      Free PE               62
      Allocated PE          0
      PV UUID               Z2JXRP-fa5g-SYS5-xzMs-Lq8C-1Jbh-QPKihr