Page 1 of 6

Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 6:30 pm
by totorweb
hi,

with the help of fvdw i can boot a standalone firmware on my 5big1.
With a static version of mdadm i am trying to see if i can recover a raid5 array.

the raid5 array md4 seems to be in a very bad state...

Maybe the recover is impossible...
Here is some usefull informations.

Code: Select all
=============================================================
root@(none):/proc # mdadm --examine --scan -v
ARRAY /dev/md4 level=raid5 num-devices=5 UUID=9a19ca8c:cb7e438e:319ec53c:c4811768
   spares=2   devices=/dev/sda2,/dev/sdb2,/dev/sdc2,/dev/sdd2,/dev/sde2
ARRAY /dev/md3 level=raid1 num-devices=5 UUID=677ae0b6:c1786136:21463b36:ab8fe91f
   spares=1   devices=/dev/sda5,/dev/sdb5,/dev/sdc5,/dev/sdd5,/dev/sde5
ARRAY /dev/md0 level=raid1 num-devices=5 UUID=4d8ddb0b:7628b8d9:7041890e:35b62a40
   devices=/dev/sda7,/dev/sdb7,/dev/sdc7,/dev/sdd7,/dev/sde7
ARRAY /dev/md1 level=raid1 num-devices=5 UUID=f7adf28f:85d8c92c:71decae2:b9cb8f5e
   devices=/dev/sda8,/dev/sdb8,/dev/sdc8,/dev/sdd8,/dev/sde8
ARRAY /dev/md2 level=raid1 num-devices=5 UUID=70134a7d:bc4287a0:17af4ce9:c1c88247
   spares=1   devices=/dev/sda9,/dev/sdb9,/dev/sdc9,/dev/sdd9,/dev/sde9
=============================================================
root@(none):/proc # cat /proc/partitions
major minor  #blocks  name

   8        0 1465138584 sde
   8        1          1 sde1
   8        2 1464131970 sde2
   8        5     128457 sde5
   8        6       8001 sde6
   8        7       8001 sde7
   8        8     176683 sde8
   8        9     674698 sde9
   8       10       8001 sde10
   8       16 1465138584 sdd
   8       17          1 sdd1
   8       18 1464131970 sdd2
   8       21     128457 sdd5
   8       22       8001 sdd6
   8       23       8001 sdd7
   8       24     176683 sdd8
   8       25     674698 sdd9
   8       26       8001 sdd10
   8       32 1465138584 sdc
   8       33          1 sdc1
   8       34 1464131970 sdc2
   8       37     128457 sdc5
   8       38       8001 sdc6
   8       39       8001 sdc7
   8       40     176683 sdc8
   8       41     674698 sdc9
   8       42       8001 sdc10
   8       48 1465138584 sdb
   8       49          1 sdb1
   8       50 1464131970 sdb2
   8       53     128457 sdb5
   8       54       8001 sdb6
   8       55       8001 sdb7
   8       56     176683 sdb8
   8       57     674698 sdb9
   8       58       8001 sdb10
   8       64 1465138584 sda
   8       65          1 sda1
   8       66 1464131970 sda2
   8       69     128457 sda5
   8       70       8001 sda6
   8       71       8001 sda7
   8       72     176683 sda8
   8       73     674698 sda9
   8       74       8001 sda10
=============================================================
root@(none):/proc # fdisk -l

Disk /dev/sde: 1500.3 GB, 1500301910016 bytes
255 heads, 63 sectors/track, 182401 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks  Id System
/dev/sde1               1         125     1004031   5 Extended
/dev/sde2             126      182401  1464131970  83 Linux
/dev/sde5               1          16      128457  82 Linux swap
/dev/sde6              17          17        8001  83 Linux
/dev/sde7              18          18        8001  83 Linux
/dev/sde8              19          40      176683+ 83 Linux
/dev/sde9              41         124      674698+ 83 Linux
/dev/sde10            125         125        8001  83 Linux

Disk /dev/sdd: 1500.3 GB, 1500301910016 bytes
255 heads, 63 sectors/track, 182401 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks  Id System
/dev/sdd1               1         125     1004031   5 Extended
/dev/sdd2             126      182401  1464131970  83 Linux
/dev/sdd5               1          16      128457  82 Linux swap
/dev/sdd6              17          17        8001  83 Linux
/dev/sdd7              18          18        8001  83 Linux
/dev/sdd8              19          40      176683+ 83 Linux
/dev/sdd9              41         124      674698+ 83 Linux
/dev/sdd10            125         125        8001  83 Linux

Disk /dev/sdc: 1500.3 GB, 1500301910016 bytes
255 heads, 63 sectors/track, 182401 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks  Id System
/dev/sdc1               1         125     1004031   5 Extended
/dev/sdc2             126      182401  1464131970  83 Linux
/dev/sdc5               1          16      128457  82 Linux swap
/dev/sdc6              17          17        8001  83 Linux
/dev/sdc7              18          18        8001  83 Linux
/dev/sdc8              19          40      176683+ 83 Linux
/dev/sdc9              41         124      674698+ 83 Linux
/dev/sdc10            125         125        8001  83 Linux

Disk /dev/sdb: 1500.3 GB, 1500301910016 bytes
255 heads, 63 sectors/track, 182401 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks  Id System
/dev/sdb1               1         125     1004031   5 Extended
/dev/sdb2             126      182401  1464131970  83 Linux
/dev/sdb5               1          16      128457  82 Linux swap
/dev/sdb6              17          17        8001  83 Linux
/dev/sdb7              18          18        8001  83 Linux
/dev/sdb8              19          40      176683+ 83 Linux
/dev/sdb9              41         124      674698+ 83 Linux
/dev/sdb10            125         125        8001  83 Linux

Disk /dev/sda: 1500.3 GB, 1500301910016 bytes
255 heads, 63 sectors/track, 182401 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks  Id System
/dev/sda1               1         125     1004031   5 Extended
/dev/sda2             126      182401  1464131970  83 Linux
/dev/sda5               1          16      128457  82 Linux swap
/dev/sda6              17          17        8001  83 Linux
/dev/sda7              18          18        8001  83 Linux
/dev/sda8              19          40      176683+ 83 Linux
/dev/sda9              41         124      674698+ 83 Linux
/dev/sda10            125         125        8001  83 Linux
=============================================================
root@(none):/proc # ls -l /dev/sd*
brw-rw-rw-    1 root     root        8,  64 Oct 27  2013 /dev/sda
brw-rw-rw-    1 root     root        8,  65 Oct 27  2013 /dev/sda1
brw-rw-rw-    1 root     root        8,  74 Oct 27  2013 /dev/sda10
brw-rw-rw-    1 root     root        8,  66 Oct 27  2013 /dev/sda2
brw-rw-rw-    1 root     root        8,  67 Oct 27  2013 /dev/sda3
brw-rw-rw-    1 root     root        8,  68 Oct 27  2013 /dev/sda4
brw-rw-rw-    1 root     root        8,  69 Oct 27  2013 /dev/sda5
brw-rw-rw-    1 root     root        8,  70 Oct 27  2013 /dev/sda6
brw-rw-rw-    1 root     root        8,  71 Oct 27  2013 /dev/sda7
brw-rw-rw-    1 root     root        8,  72 Oct 27  2013 /dev/sda8
brw-rw-rw-    1 root     root        8,  73 Oct 27  2013 /dev/sda9
brw-rw-rw-    1 root     root        8,  48 Oct 27  2013 /dev/sdb
brw-rw-rw-    1 root     root        8,  49 Oct 27  2013 /dev/sdb1
brw-rw-rw-    1 root     root        8,  58 Oct 27  2013 /dev/sdb10
brw-rw-rw-    1 root     root        8,  50 Oct 27  2013 /dev/sdb2
brw-rw-rw-    1 root     root        8,  51 Oct 27  2013 /dev/sdb3
brw-rw-rw-    1 root     root        8,  52 Oct 27  2013 /dev/sdb4
brw-rw-rw-    1 root     root        8,  53 Oct 27  2013 /dev/sdb5
brw-rw-rw-    1 root     root        8,  54 Oct 27  2013 /dev/sdb6
brw-rw-rw-    1 root     root        8,  55 Oct 27  2013 /dev/sdb7
brw-rw-rw-    1 root     root        8,  56 Oct 27  2013 /dev/sdb8
brw-rw-rw-    1 root     root        8,  57 Oct 27  2013 /dev/sdb9
brw-rw-rw-    1 root     root        8,  32 Oct 27  2013 /dev/sdc
brw-rw-rw-    1 root     root        8,  33 Oct 27  2013 /dev/sdc1
brw-rw-rw-    1 root     root        8,  42 Oct 27  2013 /dev/sdc10
brw-rw-rw-    1 root     root        8,  34 Oct 27  2013 /dev/sdc2
brw-rw-rw-    1 root     root        8,  35 Oct 27  2013 /dev/sdc3
brw-rw-rw-    1 root     root        8,  36 Oct 27  2013 /dev/sdc4
brw-rw-rw-    1 root     root        8,  37 Oct 27  2013 /dev/sdc5
brw-rw-rw-    1 root     root        8,  38 Oct 27  2013 /dev/sdc6
brw-rw-rw-    1 root     root        8,  39 Oct 27  2013 /dev/sdc7
brw-rw-rw-    1 root     root        8,  40 Oct 27  2013 /dev/sdc8
brw-rw-rw-    1 root     root        8,  41 Oct 27  2013 /dev/sdc9
brw-rw-rw-    1 root     root        8,  16 Oct 27  2013 /dev/sdd
brw-rw-rw-    1 root     root        8,  17 Oct 27  2013 /dev/sdd1
brw-rw-rw-    1 root     root        8,  26 Oct 27  2013 /dev/sdd10
brw-rw-rw-    1 root     root        8,  18 Oct 27  2013 /dev/sdd2
brw-rw-rw-    1 root     root        8,  19 Oct 27  2013 /dev/sdd3
brw-rw-rw-    1 root     root        8,  20 Oct 27  2013 /dev/sdd4
brw-rw-rw-    1 root     root        8,  21 Oct 27  2013 /dev/sdd5
brw-rw-rw-    1 root     root        8,  22 Oct 27  2013 /dev/sdd6
brw-rw-rw-    1 root     root        8,  23 Oct 27  2013 /dev/sdd7
brw-rw-rw-    1 root     root        8,  24 Oct 27  2013 /dev/sdd8
brw-rw-rw-    1 root     root        8,  25 Oct 27  2013 /dev/sdd9
brw-rw-rw-    1 root     root        8,   0 Oct 27  2013 /dev/sde
brw-rw-rw-    1 root     root        8,   1 Oct 27  2013 /dev/sde1
brw-rw-rw-    1 root     root        8,  10 Oct 27  2013 /dev/sde10
brw-rw-rw-    1 root     root        8,   2 Oct 27  2013 /dev/sde2
brw-rw-rw-    1 root     root        8,   3 Oct 27  2013 /dev/sde3
brw-rw-rw-    1 root     root        8,   4 Oct 27  2013 /dev/sde4
brw-rw-rw-    1 root     root        8,   5 Oct 27  2013 /dev/sde5
brw-rw-rw-    1 root     root        8,   6 Oct 27  2013 /dev/sde6
brw-rw-rw-    1 root     root        8,   7 Oct 27  2013 /dev/sde7
brw-rw-rw-    1 root     root        8,   8 Oct 27  2013 /dev/sde8
brw-rw-rw-    1 root     root        8,   9 Oct 27  2013 /dev/sde9
=============================================================
root@(none):/proc # mdadm --examine /dev/sd[abcde]2
/dev/sda2:
          Magic : a92b4efc
        Version : 0.90.00
           UUID : 9a19ca8c:cb7e438e:319ec53c:c4811768
  Creation Time : Thu Jun 18 12:23:53 2009
     Raid Level : raid5
  Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
     Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
   Raid Devices : 5
  Total Devices : 4
Preferred Minor : 4

    Update Time : Mon Aug 26 03:25:46 2013
          State : clean
 Active Devices : 3
Working Devices : 4
 Failed Devices : 2
  Spare Devices : 1
       Checksum : f8810833 - correct
         Events : 96094

         Layout : left-symmetric
     Chunk Size : 64K

      Number   Major   Minor   RaidDevice State
this     5       8       82        5      spare

   0     0       8        2        0      active sync   /dev/sde2
   1     1       0        0        1      faulty removed
   2     2       8       34        2      active sync   /dev/sdc2
   3     3       8       50        3      active sync   /dev/sdb2
   4     4       0        0        4      faulty removed
   5     5       8       82        5      spare
/dev/sdb2:
          Magic : a92b4efc
        Version : 0.90.00
           UUID : 9a19ca8c:cb7e438e:319ec53c:c4811768
  Creation Time : Thu Jun 18 12:23:53 2009
     Raid Level : raid5
  Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
     Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
   Raid Devices : 5
  Total Devices : 4
Preferred Minor : 4

    Update Time : Mon Aug 26 03:25:46 2013
          State : clean
 Active Devices : 3
Working Devices : 4
 Failed Devices : 2
  Spare Devices : 1
       Checksum : f8810815 - correct
         Events : 96094

         Layout : left-symmetric
     Chunk Size : 64K

      Number   Major   Minor   RaidDevice State
this     3       8       50        3      active sync   /dev/sdb2

   0     0       8        2        0      active sync   /dev/sde2
   1     1       0        0        1      faulty removed
   2     2       8       34        2      active sync   /dev/sdc2
   3     3       8       50        3      active sync   /dev/sdb2
   4     4       0        0        4      faulty removed
   5     5       8       82        5      spare
/dev/sdc2:
          Magic : a92b4efc
        Version : 0.90.00
           UUID : 9a19ca8c:cb7e438e:319ec53c:c4811768
  Creation Time : Thu Jun 18 12:23:53 2009
     Raid Level : raid5
  Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
     Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
   Raid Devices : 5
  Total Devices : 4
Preferred Minor : 4

    Update Time : Mon Aug 26 03:25:46 2013
          State : clean
 Active Devices : 3
Working Devices : 4
 Failed Devices : 2
  Spare Devices : 1
       Checksum : f8810803 - correct
         Events : 96094

         Layout : left-symmetric
     Chunk Size : 64K

      Number   Major   Minor   RaidDevice State
this     2       8       34        2      active sync   /dev/sdc2

   0     0       8        2        0      active sync   /dev/sde2
   1     1       0        0        1      faulty removed
   2     2       8       34        2      active sync   /dev/sdc2
   3     3       8       50        3      active sync   /dev/sdb2
   4     4       0        0        4      faulty removed
   5     5       8       82        5      spare
/dev/sdd2:
          Magic : a92b4efc
        Version : 0.90.00
           UUID : 9a19ca8c:cb7e438e:319ec53c:c4811768
  Creation Time : Thu Jun 18 12:23:53 2009
     Raid Level : raid5
  Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
     Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
   Raid Devices : 5
  Total Devices : 5
Preferred Minor : 4

    Update Time : Tue Jul 16 22:43:15 2013
          State : clean
 Active Devices : 4
Working Devices : 5
 Failed Devices : 1
  Spare Devices : 1
       Checksum : f8495ab9 - correct
         Events : 8152

         Layout : left-symmetric
     Chunk Size : 64K

      Number   Major   Minor   RaidDevice State
this     5       8       18        5      spare   /dev/sdd2

   0     0       8        2        0      active sync   /dev/sde2
   1     1       0        0        1      faulty removed
   2     2       8       34        2      active sync   /dev/sdc2
   3     3       8       50        3      active sync   /dev/sdb2
   4     4       8       66        4      active sync   /dev/sda2
   5     5       8       18        5      spare   /dev/sdd2
/dev/sde2:
          Magic : a92b4efc
        Version : 0.90.00
           UUID : 9a19ca8c:cb7e438e:319ec53c:c4811768
  Creation Time : Thu Jun 18 12:23:53 2009
     Raid Level : raid5
  Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
     Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
   Raid Devices : 5
  Total Devices : 4
Preferred Minor : 4

    Update Time : Mon Aug 26 03:25:46 2013
          State : clean
 Active Devices : 3
Working Devices : 4
 Failed Devices : 2
  Spare Devices : 1
       Checksum : f88107df - correct
         Events : 96094

         Layout : left-symmetric
     Chunk Size : 64K

      Number   Major   Minor   RaidDevice State
this     0       8        2        0      active sync   /dev/sde2

   0     0       8        2        0      active sync   /dev/sde2
   1     1       0        0        1      faulty removed
   2     2       8       34        2      active sync   /dev/sdc2
   3     3       8       50        3      active sync   /dev/sdb2
   4     4       0        0        4      faulty removed
   5     5       8       82        5      spare

   
   

Re: Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 6:54 pm
by Mijzelf
Can you give some history information? The array seems to have 6 members, from which 1 spare. Did you have a 6th disk connected via e-sata?

Re: Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 7:46 pm
by totorweb
that's i am trying to investigate with the owner of the nas... :dontknow

What i know is that the raid5 became failed on lacie, a disk led (sdd) was red.
My friend had physicaly changed the disk with a new one (same size but not empty, it contain a windows ntfs partition) but the lacie os has never see or reconstruct the raid.
When i first logon the nas with telnet and the standalone firmware, the fdisk -l command showed me a NTFS disk on sdd and i changed this disk with the old one.

"mdadm --examine --scan -v" give 5 disks and 2 spares, maybe the 6th disk is the ntfs disk configured as a spare ?

ARRAY /dev/md4 level=raid5 num-devices=5 UUID=9a19ca8c:cb7e438e:319ec53c:c4811768
spares=2 devices=/dev/sda2,/dev/sdb2,/dev/sdc2,/dev/sdd2,/dev/sde2

Re: Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 9:07 pm
by Mijzelf
OK. When I extract some metadata, I get this:
Code: Select all
      Used Dev Size : 1464131904 (1396.31 GiB 1499.27 GB)
         Array Size : 5856527616 (5585.22 GiB 5997.08 GB)
A raid member is 1.5TB, the whole array is 6TB. So at least 4 members are needed to assemble the array.

Further:
Code: Select all
        Number   Major   Minor   RaidDevice State
sda2     5       8       82        5      spare
sdb2     3       8       50        3      active sync   /dev/sdb2
sdc2     2       8       34        2      active sync   /dev/sdc2
sdd2     5       8       18        5      spare   /dev/sdd2
sde2     0       8        2        0      active sync   /dev/sde2
We have raid members 0, 2 and 3. And 2 spares. You said sdd is a new disk, which was not initialized yet. Yet it has a raid header, *and* a recognizable ntfs partition?
Further it is striking that the device names are in reverse order of the Minors. On the 5Big2 this would have been strange, but maybe not on the 5Big1. Maybe fvdw can tell?

Further all raid members have 'Update time Mon Aug 26 03:25:46 2013', except sdd2, which has 'Tue Jul 16 22:43:15 2013'. Is it possible that sdd failed on July 16, and was exchanged by something else, that the array kept working until August 26, at which moment somehow sda2 was dropped, killing the array, and that now the original sdd is back?
If so, is the box actually used between July 16 and August 26?

The point is, the array has to be re-created using one of the spare members, to get 4 members. This can be done in 4 ways, using sda2 or sdd2, and as member 1 or 4. The created array might contain a recognizable filesystem, which almost for sure will need to be repaired, in order to mount it. The means changes will be made to the array, lowering the chance that assembling the array using the other spare will be successful.

Ideally you create the array using one of the spares, copy the array contents to another disk/array, and try to repair/mount the copy. If that fails another spare or another role can be tried.
But to do so, you'll have to have a block device of 6TB. Maybe 2*3TB in raid0.

How valuable is the data?

Re: Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 10:05 pm
by totorweb
Ok i have more info.

On Jul 16 the disk sdd failed and the array continued working. Sdd was replaced (by a disk with ntfs parition) but never reconstructed.
On Aug 26 there was an electrical problem and the lacie never rebooted, i think at this moment sda had a problem.

So yes the nas was used between July 16 and August 26.

Is the command like this ? :
Code: Select all
mdadm --verbose --create /dev/md4 --metadata=0.90 --chunk=64 --level=5 --raid-devices=4 /dev/sde2 /dev/sdc2 /dev/sdb2 /dev/sda2

or
Code: Select all
mdadm --verbose --create /dev/md4 --metadata=0.90 --chunk=64 --level=5 --raid-devices=4 /dev/sde2 /dev/sdd2 /dev/sdc2 /dev/sdb2


Some of the data are important for my friend but he thought they were lost, i'm just trying a last resort with no guarantee of success.

Re: Raid 5 failed on 5big1

PostPosted: Sun Oct 27, 2013 10:58 pm
by fvdw
Further it is striking that the device names are in reverse order of the Minors. On the 5Big2 this would have been strange, but maybe not on the 5Big1. Maybe fvdw can tell?


This is because the funny way Lacie connects the disk to the bus,
for example on de 5big1 you have a pci interface on this bus sda is device 4:0:0:0 sdb device 3 sdc device 2 sdd is device 1and sde is device 0
For that reason I used a disk map routine to have always sda linked with device 4 even if it is taken out. Not doing that makes the location of sda,b,c and d dependent on amount of disks inserted.

On the 5big2 it is even more complicated there sda is device 0:3 , sdb device 0:2 and sdc device 0:1 sdd 1:2 and sde 1:1
so there the minors are for sda starting with 32, for sdb with 16 , sdc 0 sdd 64 sde 48

I know very confusing. Also on 5big2 a diskmap is needed to link sda always to the same device independent from number of disks installed

Re: Raid 5 failed on 5big1

PostPosted: Mon Oct 28, 2013 8:51 am
by Mijzelf
@totorweb: So sdd is not likely to have any useable information, while possibly sda only had a damaged raid header.

I suggest to re-create the array with sdd missing:
Code: Select all
mdadm --verbose --create --assume-clean --metadata=0.90 --chunk=64 --level=raid5 --layout=left-symmetric --raid-devices=5 /dev/md4 /dev/sde2 missing /dev/sdc2 /dev/sdb2 /dev/sda2
When done, try to mount it read only:
Code: Select all
mkdir /tmp/mountpoint
mount -o ro /dev/md4 /tmp/mountpoint


@fvdw: The raid members are also in reverse order, so I guess the Lacie firmware had the device names in reverse order either. Is the order you used somehow imposed by the u-boot commandline? If not, I suggest you to reverse the order, and just specify to put the boot disk on the other side. That way the minors fit with the default scheme, which is less confusing, and a Lacie raid array behaves more 'logical'.

Re: Raid 5 failed on 5big1

PostPosted: Mon Oct 28, 2013 10:18 pm
by fvdw
you could be right

in the u-boot output from serial console as posted by tatae you see this
Code: Select all
Reset IDE:
Marvell Serial ATA Adapter
Found adapter at bus 1, device 7 ... Scanning channels
  Device 4: OK
Model: WDC WD5000AADS-00S9B0                    Firm: 01.00A01 Ser#:      WD-WCAV90478832
            Type: Hard Disk
            Supports 48-bit addressing
            Capacity: 476940.0 MB = 465.7 GB (976773168 x 512)

** Device 0 not available

** Device 1 not available

** Device 2 not available

** Device 3 not available


Loading from IDE device 4, partition 6: Name: hde6
  Type: U-Boot
   Image Name:   Linux-2.6.39.4
   Created:      2013-08-27  21:01:12 UTC
   Image Type:   ARM Linux Kernel Image (uncompressed)
   Data Size:    1864056 Bytes =  1.8 MB
   Load Address: 00008000
   Entry Point:  00008000
## Booting image at 00400000 ...


this indicates that the u-boot also use the reverse order.
Meaning for u-boot sde is the most left disk when looking at the backs side and sda the most right.
I asked tatae in which slot he inserted the disk and it was the disk in the most left slot when looking at backside of the nas, marked as 1

Furthermore u-boot passes these boot arguemts to the kernel
Code: Select all
Kernel command line: console=ttyS0,115200 root=/dev/sda7 ro boardType=mv88F5281 productType=5Big reset=0


This taken from this output posted by tatae
Code: Select all
Loading from IDE device 4, partition 6: Name: hde6
  Type: U-Boot
   Image Name:   Linux-2.6.39.4
   Created:      2013-09-23  18:39:53 UTC
   Image Type:   ARM Linux Kernel Image (uncompressed)
   Data Size:    1871800 Bytes =  1.8 MB
   Load Address: 00008000
   Entry Point:  00008000
## Booting image at 00400000 ...
   Image Name:   Linux-2.6.39.4
   Created:      2013-09-23  18:39:53 UTC
   Image Type:   ARM Linux Kernel Image (uncompressed)
   Data Size:    1871800 Bytes =  1.8 MB
   Load Address: 00008000
   Entry Point:  00008000
   Verifying Checksum ... OK
OK

Starting kernel ...

Uncompressing Linux... done, booting the kernel.
Linux version 2.6.39.4 (root@fvdwsl-5big2.local) (gcc version 4.5.4 (GCC) ) #20 PREEMPT Mon Sep 23 19:39:28 GMT+1 2013
CPU: Feroceon [41069260] revision 0 (ARMv5TEJ), cr=00053177
CPU: VIVT data cache, VIVT instruction cache
Machine: LaCie 5Big1 Network
Clearing invalid memory bank 0KB@0x00000000
Clearing invalid memory bank 0KB@0x00000000
Clearing invalid memory bank 0KB@0x00000000
Ignoring unrecognised tag 0x00000000
Ignoring unrecognised tag 0x00000000
Ignoring unrecognised tag 0x00000000
Ignoring unrecognised tag 0x41000403
Memory policy: ECC disabled, Data cache writeback
Built 1 zonelists in Zone order, mobility grouping on.  Total pages: 32512
Kernel command line: console=ttyS0,115200 root=/dev/sda7 ro boardType=mv88F5281 productType=5Big reset=0
PID hash table entries: 512 (order: -1, 2048 bytes)
Dentry cache hash table entries: 16384 (order: 4, 65536 bytes)
Inode-cache hash table entries: 8192 (order: 3, 32768 bytes)
Memory: 128MB = 128MB total
Memory: 125992k/125992k available, 5080k reserved, 0K highmem


so it boots from device 4 (slot1) u-boot uses hde as disk id but passes bootarg sda7 as root file system so therefore my guess was that I need set slot 1 as sda. if you do not do that then it boots a kernel from the disk in slot 1 on sda6 but tries to find a root file system in partition 7 of the disk in slot 5. So I think Lacie also uses the disk map I used.

(Of course I can adapt it in the kernel (running kernel and standalone and of course adapt the dev nodes definition accordingly). But if lacie has used in raid setep the reversed order it can also be solved by putting the disk prepared by lacie firmware in reverse order (so disk 5 in slot 1 etc). In all nas setups we use sda as the disk in slot 1 I find this more logical. The normal user doesn't care about device numbers on the bus (or even not know what we are talking about and why should they :-D ).

Re: Raid 5 failed on 5big1

PostPosted: Wed Oct 30, 2013 7:30 pm
by totorweb
I just tried now to recover the raid ...

root@(none):/dev # mdadm --verbose --create --assume-clean --metadata=0.90 --chunk=64 --level=raid5 --layout=left-symmetric --raid-devices=5 /dev/md4 /dev/sde2 missing /dev/sdc2 /dev/sdb2 /dev/sda2
mdadm: /dev/sde2 appears to be part of a raid array:
level=raid5 devices=5 ctime=Thu Jun 18 12:23:53 2009
mdadm: /dev/sdc2 appears to be part of a raid array:
level=raid5 devices=5 ctime=Thu Jun 18 12:23:53 2009
mdadm: /dev/sdb2 appears to be part of a raid array:
level=raid5 devices=5 ctime=Thu Jun 18 12:23:53 2009
mdadm: /dev/sda2 appears to be part of a raid array:
level=raid5 devices=5 ctime=Thu Jun 18 12:23:53 2009
mdadm: size set to 1464131904K
Continue creating array? y
mdadm: unexpected failure opening /dev/md4
root@(none):/dev #

:scratch

Re: Raid 5 failed on 5big1

PostPosted: Wed Oct 30, 2013 7:57 pm
by fvdw
let me check if I have disabled mdadm support in the kernel in the attempt to make it smaller