Welcome 微信登录

首页 / 操作系统 / Linux / Vertica 7.1安装最佳实践(RHEL6.4)

一、前期准备工作
  • 1.1各节点IP和主机名
  • 1.2上传脚本并设定环境变量
  • 1.3添加信任
  • 1.4前期准备检查并调整
二、Vertica安装三、集群性能评估

一、前期准备工作:

1.1各节点IP和主机名

192.168.1.137 DB01192.168.1.138 DB02192.168.1.139 DB03192.168.1.140 DB04在节点1配置/etc/hosts,添加上面信息。

1.2上传脚本并设定环境变量

在节点1上传两个安装脚本到/usr/local/bincluster_copy_all_nodes #!/bin/bashSELF=`hostname`if [ -z "$NODE_LIST" ]; thenechoecho Error: NODE_LIST environment variable must be set in .bash_profileexit 1fifor i in $NODE_LIST; doif [ ! $i = $SELF ]; thenif [ $1 = "-r" ]; thenscp -oStrictHostKeyChecking=no -r $2 $i:$3elsescp -oStrictHostKeyChecking=no $1 $i:$2fifidonewaitcluster_run_all_nodes  #!/bin/bashif [ -z "$NODE_LIST" ]; thenechoecho Error: NODE_LIST environment variable must be set in .bash_profileexit 1fiif [[ $1 = "--background" ]]; thenshiftfor i in $NODE_LIST; dossh -oStrictHostKeyChecking=no -n $i "$@" &doneelsefor i in $NODE_LIST; dossh -oStrictHostKeyChecking=no $i "$@"donefiwait配置节点1的环境变量vi /root/.bash_profile       export NODE_LIST="DB01 DB02 DB03 DB04"

1.3添加信任

12345678ssh-keygen -q -t rsa  -N "" -f  ~/.ssh/id_rsa ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.137ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.138ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.139ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.140 cluster_run_all_nodes "hostname ; date"

1.4前期准备检查并调整

1.4.1 同步检查系统版本,运行级别,挂载目录结构,网卡信息

1234cluster_run_all_nodes "hostname;cat /etc/RedHat-release"cluster_run_all_nodes "hostname; /sbin/runlevel "cluster_run_all_nodes "hostname; df -h"cluster_run_all_nodes "hostname; ethtool eth0 | grep Speed"

1.4.2 同步脚本,校对主机名,同步/etc/hosts

123456cluster_copy_all_nodes /root/.bash_profile /root/cluster_copy_all_nodes /usr/local/bin/cluster_run_all_nodes  /usr/local/bin/cluster_copy_all_nodes /usr/local/bin/cluster_copy_all_nodes /usr/local/bin/cluster_run_all_nodes "hostname; /bin/hostname -f; grep HOSTNAME /etc/sysconfig/network"cluster_copy_all_nodes /etc/hosts /etc/cluster_run_all_nodes "hostname; cat /etc/hosts"

1.4.3 同步时间、时区、NTP服务状态

1234567cluster_run_all_nodes "hostname;date"cluster_run_all_nodes "date 032411082015.00"cluster_run_all_nodes "hwclock -r"cluster_run_all_nodes "hwclock -w"cluster_run_all_nodes "hostname; echo ${TZ}; echo ${LANG}"cluster_run_all_nodes "hostname; cat /etc/sysconfig/clock"cluster_run_all_nodes "hostname; /sbin/chkconfig --list ntpd"

1.4.4 同步Selinux配置,防火墙配置

1234567cluster_run_all_nodes "hostname; grep "SELINUX=" /etc/selinux/config"cluster_run_all_nodes "hostname; setenforce 0"vi /etc/selinux/config disabledcluster_copy_all_nodes /etc/selinux/config /etc/selinux/cluster_run_all_nodes "hostname; /sbin/chkconfig --list iptables"cluster_run_all_nodes "hostname; /sbin/chkconfig --level 0123456 iptables off"cluster_run_all_nodes "service iptables stop"

1.4.5 同步CPU、内存配置

12345cluster_run_all_nodes "hostname; grep processor /proc/cpuinfo | wc -l"cluster_run_all_nodes "hostname; grep MHz /proc/cpuinfo | sort -u"cluster_run_all_nodes "hostname; grep MemTotal /proc/meminfo"cluster_run_all_nodes "hostname; /sbin/chkconfig --list cpuspeed"cluster_run_all_nodes "/sbin/chkconfig --level 0123456 cpuspeed off;/sbin/service cpuspeed stop"

1.4.6 同步检查rsync、python版本

12cluster_run_all_nodes "hostname; rsync --version | grep version"cluster_run_all_nodes "hostname; /usr/bin/python -V"

1.4.7 同步IO配置

1234567891011121314151617181920212223242526272829303132333435363738--选择<deadline>cluster_run_all_nodes "hostname; /sbin/modinfo cciss | grep version"cluster_run_all_nodes "hostname; cat /sys/block/sda/queue/scheduler"--cluster_run_all_nodes "hostname; cat /sys/block/sdb/queue/scheduler"cluster_run_all_nodes "hostname; echo deadline > /sys/block/sda/queue/scheduler"--cluster_run_all_nodes "hostname; echo deadline > /sys/block/sdb/queue/scheduler"cluster_run_all_nodes "echo "deadline > /sys/block/sda/queue/scheduler" >> /etc/rc.d/rc.local"--cluster_run_all_nodes "echo "deadline > /sys/block/sdb/queue/scheduler" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep scheduler /etc/rc.d/rc.local"noop anticipatory [deadline] cfq --修改/sys/kernel/mm/redhat_transparent_hugepage/enabledcluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/enabled"cluster_run_all_nodes "hostname; echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled"cluster_run_all_nodes "echo "never > /sys/kernel/mm/redhat_transparent_hugepage/enabled" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"always [never]--修改/sys/kernel/mm/redhat_transparent_hugepage/defragcluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/defrag"cluster_run_all_nodes "hostname; echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag"cluster_run_all_nodes "echo "never > /sys/kernel/mm/redhat_transparent_hugepage/defrag" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"always [never]--修改/sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defragcluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag"cluster_run_all_nodes "hostname; echo no > /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag"cluster_run_all_nodes "echo "no > /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"yes [no]--修改/proc/sys/vm/swappinesscluster_run_all_nodes "hostname; cat /proc/sys/vm/swappiness"cluster_run_all_nodes "hostname; echo 0 > /proc/sys/vm/swappiness"cluster_run_all_nodes "echo "0 > /proc/sys/vm/swappiness" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep swappiness /etc/rc.d/rc.local"--修改readaheadcluster_run_all_nodes "hostname; /sbin/blockdev --getra /dev/sda"cluster_run_all_nodes "hostname; /sbin/blockdev --setra 8192 /dev/sda"cluster_run_all_nodes "echo "/sbin/blockdev --setra 8192 /dev/sda" >> /etc/rc.d/rc.local"cluster_run_all_nodes "grep blockdev /etc/rc.d/rc.local"

1.4.8 同步系统配置

123456789101112131415161718192021222324--同步/etc/security/limits.confvi /etc/security/limits.conf* soft nofile 655360* hard nofile 655360dbadmin - nice 0dbadmin soft nproc 145209dbadmin hard nproc 145209cluster_run_all_nodes "hostname; ulimit -n 655360"cluster_run_all_nodes "hostname; ulimit -n"cluster_copy_all_nodes /etc/security/limits.conf /etc/security/--同步/etc/sysctl.confvm.max_map_count=9293346vm.min_free_kbytes=65535fs.file-max=13226642net.core.rmem_default=262144net.core.rmem_max=262144net.core.wmem_default=262144net.core.wmem_max=262144cluster_copy_all_nodes /etc/sysctl.conf /etc/cluster_run_all_nodes "hostname;sysctl -p /etc/sysctl.conf"--同步/etc/pam.d/susession required pam_limits.socluster_copy_all_nodes /etc/pam.d/su /etc/pam.d/cluster_run_all_nodes "hostname; grep session /etc/pam.d/su"

二、Vertica安装

123rpm -ivh vertica-7.1.0-3.x86_64.RHEL5.rpm /opt/vertica/sbin/install_vertica -s DB01,DB02,DB03,DB04 -r /usr2/vertica-7.1.0-3.x86_64.RHEL5.rpm --failure-threshold=HALT -u dbadmin -p vertica
12cluster_run_all_nodes "hostname;mkdir -p /data/verticadb"cluster_run_all_nodes "hostname;chown -R dbadmin:verticadba /data/verticadb"

三、集群性能评估

123456cluster_run_all_nodes "hostname; /opt/vertica/bin/vcpuperf" > /tmp/vcpuperf.log cluster_run_all_nodes "hostname; /opt/vertica/bin/vioperf /data" > /tmp/vioperf_data.log su - dbadmin/opt/vertica/bin/vnetperf > /tmp/vnetperf.log
本文永久更新链接地址