10月 152014
 

1 配置iscsi服务器端

[root@localhost ~]# yum install scsi-target-utils
[root@localhost ~]# service tgtd start
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]# chkconfig tgtd on
[root@localhost ~]#
[root@localhost ~]# mkdir /vdisk
[root@localhost ~]# dd if=/dev/zero of=/vdisk/vdisk.img bs=1M count=500
 500+0 records in
 500+0 records out
 524288000 bytes (524 MB) copied, 2.24159 s, 234 MB/s
[root@localhost ~]#

修改配置文件

[root@localhost ~]# vi /etc/tgt/targets.conf
 <target iqn.2014-10.com.example.target1>
 backing-store /vdisk/vdisk.img
 write-cache on
 </target>

重启服务

[root@localhost ~]# service tgtd restart
 Stopping SCSI target daemon: [ OK ]
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]#

验证

[root@localhost ~]# tgt-admin --show target1
 Target 1: iqn.2014-10.com.example.target1
 System information:
 Driver: iscsi
 State: ready
 I_T nexus information:
 I_T nexus: 2
 Initiator: iqn.1994-05.com.redhat:f8c8fd7aac5c
 Connection: 0
 IP Address: 192.168.254.129
 I_T nexus: 4
 Initiator: iqn.1994-05.com.redhat:67f04a9b2426
 Connection: 0
 IP Address: 192.168.254.128
 LUN information:
 LUN: 0
 Type: controller
 SCSI ID: IET 00010000
 SCSI SN: beaf10
 Size: 0 MB, Block size: 1
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: null
 Backing store path: None
 Backing store flags:
 LUN: 1
 Type: disk
 SCSI ID: IET 00010001
 SCSI SN: beaf11
 Size: 524 MB, Block size: 512
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: rdwr
 Backing store path: /vdisk/vdisk.img
 Backing store flags:
 Account information:
 ACL information:
 ALL
 [root@localhost ~]#

2 配置iscsi客户端

安装,系统已默认安装
[root@localhost ~]# yum install iscsi-initiator-utils
Package iscsi-initiator-utils-6.2.0.873-10.el6.x86_64 already installed and latest version

发现

[root@localhost ~]# iscsiadm -m discovery -t sendtargets -p 192.168.254.130:3260
 Starting iscsid: [ OK ]
 192.168.254.130:3260,1 iqn.2014-10.com.example.target1
[root@localhost ~]#

登录

[root@localhost ~]# iscsiadm -m node -T iqn.2014-10.com.example.target1 --login
 Logging in to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] (multiple)
 Login to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] successful.
[root@localhost ~]#

查看新增设备

[root@localhost ~]# fdisk -l
 Disk /dev/sdb: 524 MB, 524288000 bytes
 17 heads, 59 sectors/track, 1020 cylinders
 Units = cylinders of 1003 * 512 = 513536 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

查看服务

[root@localhost ~]# service iscsid status
 iscsid (pid 1300) is running...
[root@localhost ~]# chkconfig --list iscsid
 iscsid 0:off 1:off 2:off 3:on 4:on 5:on 6:off
[root@localhost ~]#

3 配置multipath

[root@node1 ~]# rpm -q device-mapper-multipath
 device-mapper-multipath-0.4.9-72.el6.x86_64
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# vi /etc/multipath.conf
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# service multipathd start
 Starting multipathd daemon: [ OK ]
[root@node1 ~]# multipath -ll
 mpatha (1IET 00010001) dm-2 IET,VIRTUAL-DISK
 size=500M features='0' hwhandler='0' wp=rw
 `-+- policy='round-robin 0' prio=1 status=active
 `- 3:0:0:1 sdb 8:16 active ready running
[root@node1 ~]#

4,安装集群套件

[root@node1 ~]# yum install ricci
[root@node1 ~]# passwd ricci
[root@node1 ~]# chkconfig ricci on
[root@node1 ~]# service ricci start
[root@console ~]# yum install luci
[root@console ~]# chkconfig luci on

新建集群及增加节点并查看服务

[root@node2 ~]# service cman status
 cluster is running.
 [root@node2 ~]# service clvmd status
 clvmd (pid 3353) is running...
 Clustered Volume Groups: (none)
 Active clustered Logical Volumes: (none)
 [root@node2 ~]# service rgmanager status
 rgmanager (pid 3399) is running...
 [root@node2 ~]# service gfs2 status
 GFS2: no entries found in /etc/fstab
 [root@node2 ~]# service modclusterd status
 modclusterd (pid 3051) is running...
 [root@node2 ~]# clustat
 Cluster Status for htcluster @ Wed Oct 15 23:38:26 2014
 Member Status: Quorate
Member Name ID Status
 ------ ---- ---- ------
 node1 1 Online
 node2 2 Online, Local
[root@node2 ~]#

5 格式化文件系统

[root@node1 ~]# yum install gfs2-utils

[root@node1 ~]# mkfs.gfs2 -p lock_dlm -t htcluster:datastore -j 2 /dev/dm-2
This will destroy any data on /dev/dm-2.
It appears to contain: data

Are you sure you want to proceed? [y/n] y

Device: /dev/dm-2
Blocksize: 4096
Device Size 0.49 GB (128000 blocks)
Filesystem Size: 0.49 GB (127997 blocks)
Journals: 2
Resource Groups: 2
Locking Protocol: “lock_dlm”
Lock Table: “htcluster:datastore”
UUID: ea8bba63-df8c-9139-4e2e-e03a70f64891

[root@node1 ~]#
直接挂载(未启动集群
[root@node1 ~]# mount /dev/dm-2 /mnt/
gfs_controld join connect error: Connection refused
error mounting lockproto lock_dlm
[root@node1 ~]#

6,启动集群后,增加fstab配置文件 手动挂载

方法1
[root@node2 ~]# vi /etc/fstab
/dev/dm-2 /mnt gfs2 _netdev 0 0
[root@node2 ~]# mount /dev/dm-2 /mnt/
[root@node2 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node2 ~]#

方法2
[root@node1 ~]# vi /etc/fstab
[root@node1 ~]# service gfs2 status
GFS2: service is not running
[root@node1 ~]# service gfs2 start
Mounting GFS2 filesystem (/mnt): [ OK ]
[root@node1 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node1 ~]#

查看当前的服务运行级别
[root@node2 ~]# chkconfig –list cman
cman 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list rgmanager
rgmanager 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list clvmd
clvmd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list modclusterd
modclusterd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list gfs2
gfs2 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]#

验证
[root@node2 ~]# less /var/log/messages
[root@node2 ~]# touch /mnt/frm-n2

[root@node1 ~]# touch /mnt/frm-n1
[root@node1 ~]# ls /mnt/
frm-n1 frm-n2
[root@node1 ~]#
[root@node2 ~]# mount
/dev/mapper/VolGroup-lv_root on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/mapper/mpatha on /mnt type gfs2 (rw,relatime,hostdata=jid=0)
[root@node2 ~]#
手动重启 node2和通过luci启动node1
通过
离开和加入集群产生的配置文件变化
离开后服务被停止,原配置文件无变化
[root@node1 ~]# clustat
Could not connect to CMAN: No such file or directory
[root@node1 ~]# service cman status
corosync is stopped
[root@node1 ~]#

增加resource,确认配置文件

[root@node2 ~]# cat /etc/cluster/cluster.conf
 <?xml version="1.0"?>
 <cluster config_version="3" name="htcluster">
 <clusternodes>
 <clusternode name="node1" nodeid="1"/>
 <clusternode name="node2" nodeid="2"/>
 </clusternodes>
 <cman expected_votes="1" two_node="1"/>
 <rm>
 <resources>
 <clusterfs device="/dev/dm-2" force_unmount="1" fsid="26886" fstype="gfs2" mountpoint="/mnt" name="htgfs2" options="_netdev" self_fence="1"/>
 </resources>
 </rm>
 </cluster>
 [root@node2 ~]#

问题
luci使用ie可能有兼容性问题
两个节点同时关闭后,第一个节点启动clvmd超时,直到第二节点启动至clvmd时继续

11月 302012
 

使用fdisk列表磁盘设备

 [root@oracle ~]# fdisk -l
Disk /dev/sda: 107.4 GB, 107374182400 bytes
 255 heads, 63 sectors/track, 13054 cylinders
 Units = cylinders of 16065 * 512 = 8225280 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00013e83

Device Boot Start End Blocks Id System
 /dev/sda1 * 1 64 512000 83 Linux
 Partition 1 does not end on cylinder boundary.
 /dev/sda2 64 13055 104344576 8e Linux LVM

Disk /dev/sdb: 10.7 GB, 10737418240 bytes
 255 heads, 63 sectors/track, 1305 cylinders
 Units = cylinders of 16065 * 512 = 8225280 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

Disk /dev/mapper/vg_oracle-lv_root: 53.7 GB, 53687091200 bytes
 255 heads, 63 sectors/track, 6527 cylinders
 Units = cylinders of 16065 * 512 = 8225280 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

Disk /dev/mapper/vg_oracle-lv_swap: 4227 MB, 4227858432 bytes
 255 heads, 63 sectors/track, 514 cylinders
 Units = cylinders of 16065 * 512 = 8225280 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

Disk /dev/mapper/vg_oracle-lv_home: 48.9 GB, 48930750464 bytes
 255 heads, 63 sectors/track, 5948 cylinders
 Units = cylinders of 16065 * 512 = 8225280 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000
[root@oracle ~]#

整块磁盘作为物理卷使用,必须清除已有分区表信息

创建物理卷

 [root@oracle ~]# pvcreate /dev/sdb
 Writing physical volume data to disk "/dev/sdb"
 Physical volume "/dev/sdb" successfully created
 [root@oracle ~]#

扫描用作物理卷的块设备

 [root@oracle ~]# lvmdiskscan
 /dev/ram0 [ 16.00 MiB]
 /dev/root [ 50.00 GiB]
 /dev/ram1 [ 16.00 MiB]
 /dev/sda1 [ 500.00 MiB]
 /dev/vg_oracle/lv_swap [ 3.94 GiB]
 /dev/ram2 [ 16.00 MiB]
 /dev/sda2 [ 99.51 GiB] LVM physical volume
 /dev/vg_oracle/lv_home [ 45.57 GiB]
 /dev/ram3 [ 16.00 MiB]
 /dev/ram4 [ 16.00 MiB]
 /dev/ram5 [ 16.00 MiB]
 /dev/ram6 [ 16.00 MiB]
 /dev/ram7 [ 16.00 MiB]
 /dev/ram8 [ 16.00 MiB]
 /dev/ram9 [ 16.00 MiB]
 /dev/ram10 [ 16.00 MiB]
 /dev/ram11 [ 16.00 MiB]
 /dev/ram12 [ 16.00 MiB]
 /dev/ram13 [ 16.00 MiB]
 /dev/ram14 [ 16.00 MiB]
 /dev/ram15 [ 16.00 MiB]
 /dev/sdb [ 10.00 GiB] LVM physical volume
 3 disks
 17 partitions
 1 LVM physical volume whole disk
 1 LVM physical volume
 [root@oracle ~]#

显示物理卷属性

 [root@oracle ~]# pvs
 PV VG Fmt Attr PSize PFree
 /dev/sda2 vg_oracle lvm2 a-- 99.51g 0
 /dev/sdb lvm2 a-- 10.00g 10.00g

显示物理卷详细参数
[root@oracle ~]# pvdisplay
— Physical volume —
PV Name /dev/sda2
VG Name vg_oracle
PV Size 99.51 GiB / not usable 3.00 MiB
Allocatable yes (but full)
PE Size 4.00 MiB
Total PE 25474
Free PE 0
Allocated PE 25474
PV UUID JdCNKZ-Rkhn-yU3I-b90L-PsBA-OeWh-eFKRxo

“/dev/sdb” is a new physical volume of “10.00 GiB”
— NEW Physical volume —
PV Name /dev/sdb
VG Name
PV Size 10.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID vXmWfe-qy80-PLkB-4OP1-XG7z-d2h5-XoMu7W

[root@oracle ~]# pvscan
PV /dev/sda2 VG vg_oracle lvm2 [99.51 GiB / 0 free]
PV /dev/sdb lvm2 [10.00 GiB]
Total: 2 [109.51 GiB] / in use: 1 [99.51 GiB] / in no VG: 1 [10.00 GiB]
[root@oracle ~]#

删除物理卷(如果物理卷已加入卷组,则需要先从卷组中删除此物理卷)
[root@oracle ~]# pvremove /dev/sdb
Labels on physical volume “/dev/sdb” successfully wiped

————————————————

创建卷组

 #vgcreate vg1 /dev/sdb /dev/sdc

在当前卷组中添加物理卷

 [root@oracle ~]# pvscan
 PV /dev/sda2 VG vg_oracle lvm2 [99.51 GiB / 0 free]
 PV /dev/sdb lvm2 [10.00 GiB]
 Total: 2 [109.51 GiB] / in use: 1 [99.51 GiB] / in no VG: 1 [10.00 GiB]
 [root@oracle ~]# vgextend vg_oracle /dev/sdb
 Volume group "vg_oracle" successfully extended
 [root@oracle ~]# pvscan
 PV /dev/sda2 VG vg_oracle lvm2 [99.51 GiB / 0 free]
 PV /dev/sdb VG vg_oracle lvm2 [10.00 GiB / 10.00 GiB free]
 Total: 2 [109.50 GiB] / in use: 2 [109.50 GiB] / in no VG: 0 [0 ]
 [root@oracle ~]#
查看卷组信息
 [root@oracle ~]# vgs
 VG #PV #LV #SN Attr VSize VFree
 vg_oracle 2 3 0 wz--n- 109.50g 10.00g
 [root@oracle ~]# vgdisplay
 --- Volume group ---
 VG Name vg_oracle
 System ID
 Format lvm2
 Metadata Areas 2
 Metadata Sequence No 5
 VG Access read/write
 VG Status resizable
 MAX LV 0
 Cur LV 3
 Open LV 3
 Max PV 0
 Cur PV 2
 Act PV 2
 VG Size 109.50 GiB
 PE Size 4.00 MiB
 Total PE 28033
 Alloc PE / Size 25474 / 99.51 GiB
 Free PE / Size 2559 / 10.00 GiB
 VG UUID O3uWt8-me7n-8vrZ-fcO2-5Z6b-SUse-e0llmJ

[root@oracle ~]#

从卷组中删除物理卷
[root@oracle ~]# vgreduce vg_oracle /dev/sdb
Removed “/dev/sdb” from volume group “vg_oracle”

[root@oracle ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_oracle lvm2 a– 99.51g 0
/dev/sdb lvm2 a– 10.00g 10.00g
[root@oracle ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg_oracle 1 3 0 wz–n- 99.51g 0
[root@oracle ~]#

删除(不包含逻辑卷的)卷组
[root@oracle ~]# vgremove vg1-new
Volume group “vg1-new” successfully removed
[root@oracle ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_oracle lvm2 a– 99.51g 0
/dev/sdb lvm2 a– 10.00g 10.00g
[root@oracle ~]#

分割卷组
[root@oracle ~]# vgsplit vg_oracle new_vg /dev/sdb
New volume group “new_vg” successfully split from “vg_oracle”
[root@oracle ~]# vgs
VG #PV #LV #SN Attr VSize VFree
new_vg 1 0 0 wz–n- 10.00g 10.00g
vg_oracle 1 3 0 wz–n- 99.51g 0
[root@oracle ~]#

合并卷组
[root@oracle ~]# vgmerge -v vg_oracle new_vg
Checking for volume group “new_vg”
Checking for volume group “vg_oracle”
Archiving volume group “new_vg” metadata (seqno 2).
Archiving volume group “vg_oracle” metadata (seqno 8).
Writing out updated volume group
Creating volume group backup “/etc/lvm/backup/vg_oracle” (seqno 9).
Volume group “new_vg” successfully merged into “vg_oracle”
[root@oracle ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg_oracle 2 3 0 wz–n- 109.50g 10.00g
[root@oracle ~]#

重命名卷组
[root@oracle ~]# vgcreate vg1 /dev/sdb
Volume group “vg1” successfully created
[root@oracle ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 1 0 0 wz–n- 10.00g 10.00g
vg_oracle 1 3 0 wz–n- 99.51g 0
[root@oracle ~]# vgrename vg1 vg1-new
Volume group “vg1” successfully renamed to “vg1-new”
[root@oracle ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1-new 1 0 0 wz–n- 10.00g 10.00g
vg_oracle 1 3 0 wz–n- 99.51g 0
[root@oracle ~]#

————————————————

逻辑卷的三种类型:线性卷,条状卷,镜像卷

创建指定大小的逻辑卷分区
[root@oracle ~]# lvcreate -L 6G -n new_vg-lv_01 new_vg
Logical volume “new_vg-lv_01” created

查看创建的逻辑卷
#lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
new_vg-lv_01 new_vg -wi-a— 6.00g
lv_home vg_oracle -wi-ao– 41.57g
lv_root vg_oracle -wi-ao– 50.00g
lv_swap vg_oracle -wi-ao– 3.94g
[root@oracle ~]#

对逻辑卷进行格式化
[root@oracle ~]# mkfs.ext4 /dev/new_vg/new_vg-lv_01
mke2fs 1.41.12 (17-May-2010)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
393216 inodes, 1572864 blocks
78643 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=1610612736
48 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736

Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

This filesystem will be automatically checked every 23 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
[root@oracle ~]#

查看逻辑卷信息
[root@oracle ~]# lvscan
ACTIVE ‘/dev/new_vg/new_vg-lv_01’ [6.00 GiB] inherit
ACTIVE ‘/dev/vg_oracle/lv_root’ [50.00 GiB] inherit
ACTIVE ‘/dev/vg_oracle/lv_home’ [41.57 GiB] inherit
ACTIVE ‘/dev/vg_oracle/lv_swap’ [3.94 GiB] inherit
[root@oracle ~]#

查看逻辑卷参数信息
#lvdisplay

增大逻辑卷
[root@oracle ~]# lvextend -L +1G /dev/new_vg/new_vg-lv_01
Extending logical volume new_vg-lv_01 to 7.00 GiB
Logical volume new_vg-lv_01 successfully resized

增大逻辑卷后更新文件系统
[root@oracle ~]# resize2fs /dev/new_vg/new_vg-lv_01
resize2fs 1.41.12 (17-May-2010)
Resizing the filesystem on /dev/new_vg/new_vg-lv_01 to 1835008 (4k) blocks.
The filesystem on /dev/new_vg/new_vg-lv_01 is now 1835008 blocks long.

[root@oracle ~]#

删除逻辑卷
[root@oracle ~]# lvremove /dev/new_vg/new_vg-lv_01
Do you really want to remove active logical volume new_vg-lv_01? [y/n]: y
Logical volume “new_vg-lv_01” successfully removed
[root@oracle ~]#

挂载逻辑卷
[root@oracle ~]# mount /dev/new_vg/new_vg-lv_01 /new/
[root@oracle ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_oracle-lv_root
50G 13G 35G 27% /
tmpfs 939M 492M 448M 53% /dev/shm
/dev/sda1 485M 54M 406M 12% /boot
/dev/mapper/vg_oracle-lv_home
45G 2.6G 41G 7% /home
/dev/mapper/new_vg-new_vg–lv_01
2.0G 67M 1.9G 4% /new
[root@oracle ~]#

自动挂载文件系统
#vi /etc/fstab
/dev/new_vg/new_vg-lv_01 /new ext4 default 0 0