Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
usr
/
lib
/
udev
/
rules.d
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//usr/lib/udev/rules.d/65-md-incremental.rules
# This file causes block devices with Linux RAID (mdadm) signatures to # automatically cause mdadm to be run. # See udev(8) for syntax # Don't process any events if anaconda is running as anaconda brings up # raid devices manually ENV{ANACONDA}=="?*", GOTO="md_end" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="?*", GOTO="md_end" # We process add events on block devices (since they are ready as soon as # they are added to the system), but we must process change events as well # on any dm devices (like LUKS partitions or LVM logical volumes) and on # md devices because both of these first get added, then get brought live # and trigger a change event. The reason we don't process change events # on bare hard disks is because if you stop all arrays on a disk, then # run fdisk on the disk to change the partitions, when fdisk exits it # triggers a change event, and we want to wait until all the fdisks on # all member disks are done before we do anything. Unfortunately, we have # no way of knowing that, so we just have to let those arrays be brought # up manually after fdisk has been run on all of the disks. # First, process all add events (md and dm devices will not really do # anything here, just regular disks, and this also won't get any imsm # array members either) SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ IMPORT{program}="/sbin/mdadm -I $env{DEVNAME} --export $devnode --offroot ${DEVLINKS}" SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer" # Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline IMPORT{cmdline}="noiswmd" IMPORT{cmdline}="nodmraid" ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end" ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end" SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \ RUN+="/sbin/mdadm -I $env{DEVNAME}" LABEL="md_imsm_inc_end" SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \ RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}" SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \ RUN+="/sbin/mdadm -If $name" # Next make sure that this isn't a dm device we should skip for some reason ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end" ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end" ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end" KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \ ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}" LABEL="dm_change_end" # Finally catch any nested md raid arrays. If we brought up an md raid # array that's part of another md raid array, it won't be ready to be used # until the change event that occurs when it becomes live KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \ ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}" LABEL="md_end"