10月 152014
 

1 配置iscsi服务器端

[root@localhost ~]# yum install scsi-target-utils
[root@localhost ~]# service tgtd start
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]# chkconfig tgtd on
[root@localhost ~]#
[root@localhost ~]# mkdir /vdisk
[root@localhost ~]# dd if=/dev/zero of=/vdisk/vdisk.img bs=1M count=500
 500+0 records in
 500+0 records out
 524288000 bytes (524 MB) copied, 2.24159 s, 234 MB/s
[root@localhost ~]#

修改配置文件

[root@localhost ~]# vi /etc/tgt/targets.conf
 <target iqn.2014-10.com.example.target1>
 backing-store /vdisk/vdisk.img
 write-cache on
 </target>

重启服务

[root@localhost ~]# service tgtd restart
 Stopping SCSI target daemon: [ OK ]
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]#

验证

[root@localhost ~]# tgt-admin --show target1
 Target 1: iqn.2014-10.com.example.target1
 System information:
 Driver: iscsi
 State: ready
 I_T nexus information:
 I_T nexus: 2
 Initiator: iqn.1994-05.com.redhat:f8c8fd7aac5c
 Connection: 0
 IP Address: 192.168.254.129
 I_T nexus: 4
 Initiator: iqn.1994-05.com.redhat:67f04a9b2426
 Connection: 0
 IP Address: 192.168.254.128
 LUN information:
 LUN: 0
 Type: controller
 SCSI ID: IET 00010000
 SCSI SN: beaf10
 Size: 0 MB, Block size: 1
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: null
 Backing store path: None
 Backing store flags:
 LUN: 1
 Type: disk
 SCSI ID: IET 00010001
 SCSI SN: beaf11
 Size: 524 MB, Block size: 512
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: rdwr
 Backing store path: /vdisk/vdisk.img
 Backing store flags:
 Account information:
 ACL information:
 ALL
 [root@localhost ~]#

2 配置iscsi客户端

安装,系统已默认安装
[root@localhost ~]# yum install iscsi-initiator-utils
Package iscsi-initiator-utils-6.2.0.873-10.el6.x86_64 already installed and latest version

发现

[root@localhost ~]# iscsiadm -m discovery -t sendtargets -p 192.168.254.130:3260
 Starting iscsid: [ OK ]
 192.168.254.130:3260,1 iqn.2014-10.com.example.target1
[root@localhost ~]#

登录

[root@localhost ~]# iscsiadm -m node -T iqn.2014-10.com.example.target1 --login
 Logging in to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] (multiple)
 Login to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] successful.
[root@localhost ~]#

查看新增设备

[root@localhost ~]# fdisk -l
 Disk /dev/sdb: 524 MB, 524288000 bytes
 17 heads, 59 sectors/track, 1020 cylinders
 Units = cylinders of 1003 * 512 = 513536 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

查看服务

[root@localhost ~]# service iscsid status
 iscsid (pid 1300) is running...
[root@localhost ~]# chkconfig --list iscsid
 iscsid 0:off 1:off 2:off 3:on 4:on 5:on 6:off
[root@localhost ~]#

3 配置multipath

[root@node1 ~]# rpm -q device-mapper-multipath
 device-mapper-multipath-0.4.9-72.el6.x86_64
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# vi /etc/multipath.conf
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# service multipathd start
 Starting multipathd daemon: [ OK ]
[root@node1 ~]# multipath -ll
 mpatha (1IET 00010001) dm-2 IET,VIRTUAL-DISK
 size=500M features='0' hwhandler='0' wp=rw
 `-+- policy='round-robin 0' prio=1 status=active
 `- 3:0:0:1 sdb 8:16 active ready running
[root@node1 ~]#

4,安装集群套件

[root@node1 ~]# yum install ricci
[root@node1 ~]# passwd ricci
[root@node1 ~]# chkconfig ricci on
[root@node1 ~]# service ricci start
[root@console ~]# yum install luci
[root@console ~]# chkconfig luci on

新建集群及增加节点并查看服务

[root@node2 ~]# service cman status
 cluster is running.
 [root@node2 ~]# service clvmd status
 clvmd (pid 3353) is running...
 Clustered Volume Groups: (none)
 Active clustered Logical Volumes: (none)
 [root@node2 ~]# service rgmanager status
 rgmanager (pid 3399) is running...
 [root@node2 ~]# service gfs2 status
 GFS2: no entries found in /etc/fstab
 [root@node2 ~]# service modclusterd status
 modclusterd (pid 3051) is running...
 [root@node2 ~]# clustat
 Cluster Status for htcluster @ Wed Oct 15 23:38:26 2014
 Member Status: Quorate
Member Name ID Status
 ------ ---- ---- ------
 node1 1 Online
 node2 2 Online, Local
[root@node2 ~]#

5 格式化文件系统

[root@node1 ~]# yum install gfs2-utils

[root@node1 ~]# mkfs.gfs2 -p lock_dlm -t htcluster:datastore -j 2 /dev/dm-2
This will destroy any data on /dev/dm-2.
It appears to contain: data

Are you sure you want to proceed? [y/n] y

Device: /dev/dm-2
Blocksize: 4096
Device Size 0.49 GB (128000 blocks)
Filesystem Size: 0.49 GB (127997 blocks)
Journals: 2
Resource Groups: 2
Locking Protocol: “lock_dlm”
Lock Table: “htcluster:datastore”
UUID: ea8bba63-df8c-9139-4e2e-e03a70f64891

[root@node1 ~]#
直接挂载(未启动集群
[root@node1 ~]# mount /dev/dm-2 /mnt/
gfs_controld join connect error: Connection refused
error mounting lockproto lock_dlm
[root@node1 ~]#

6,启动集群后,增加fstab配置文件 手动挂载

方法1
[root@node2 ~]# vi /etc/fstab
/dev/dm-2 /mnt gfs2 _netdev 0 0
[root@node2 ~]# mount /dev/dm-2 /mnt/
[root@node2 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node2 ~]#

方法2
[root@node1 ~]# vi /etc/fstab
[root@node1 ~]# service gfs2 status
GFS2: service is not running
[root@node1 ~]# service gfs2 start
Mounting GFS2 filesystem (/mnt): [ OK ]
[root@node1 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node1 ~]#

查看当前的服务运行级别
[root@node2 ~]# chkconfig –list cman
cman 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list rgmanager
rgmanager 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list clvmd
clvmd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list modclusterd
modclusterd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list gfs2
gfs2 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]#

验证
[root@node2 ~]# less /var/log/messages
[root@node2 ~]# touch /mnt/frm-n2

[root@node1 ~]# touch /mnt/frm-n1
[root@node1 ~]# ls /mnt/
frm-n1 frm-n2
[root@node1 ~]#
[root@node2 ~]# mount
/dev/mapper/VolGroup-lv_root on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/mapper/mpatha on /mnt type gfs2 (rw,relatime,hostdata=jid=0)
[root@node2 ~]#
手动重启 node2和通过luci启动node1
通过
离开和加入集群产生的配置文件变化
离开后服务被停止,原配置文件无变化
[root@node1 ~]# clustat
Could not connect to CMAN: No such file or directory
[root@node1 ~]# service cman status
corosync is stopped
[root@node1 ~]#

增加resource,确认配置文件

[root@node2 ~]# cat /etc/cluster/cluster.conf
 <?xml version="1.0"?>
 <cluster config_version="3" name="htcluster">
 <clusternodes>
 <clusternode name="node1" nodeid="1"/>
 <clusternode name="node2" nodeid="2"/>
 </clusternodes>
 <cman expected_votes="1" two_node="1"/>
 <rm>
 <resources>
 <clusterfs device="/dev/dm-2" force_unmount="1" fsid="26886" fstype="gfs2" mountpoint="/mnt" name="htgfs2" options="_netdev" self_fence="1"/>
 </resources>
 </rm>
 </cluster>
 [root@node2 ~]#

问题
luci使用ie可能有兼容性问题
两个节点同时关闭后,第一个节点启动clvmd超时,直到第二节点启动至clvmd时继续

 Leave a Reply

You may use these HTML tags and attributes: <a href="" title=""> <abbr title=""> <acronym title=""> <b> <blockquote cite=""> <cite> <code> <del datetime=""> <em> <i> <q cite=""> <s> <strike> <strong>

(required)

(required)

此站点使用Akismet来减少垃圾评论。了解我们如何处理您的评论数据