1,机器详情如下:Master192.168.11.100slave192.168.11.1011.1下面在每台机器上执行vi /etc/hostsMaster 192.168.11.100slave 192.168.11.101vi /etc/sysconfig/networkHOSTNAME=Master #如果是slave 就写slave重启后生效2,在两台服务器上做相同的操作2.1解压所有包apache-hive-0.14.0-bin.tar.gzhadoop-2.5.2.tar.gzhbase-0.99.2-bin.tar.gzjdk-1.8.tar.gzzookeeper-3.4.6.tar.gz解压全部mv hadoop-2.5.2 /home/hadoop/ mv apache-hive-0.14.0-bin /usr/local/mv hbase-0.99.2 /usr/local/ mv zookeeper-3.4.6 /usr/local/ mv jdk-1.8 /usr/local/2.2环境变量vi /etc/profile###set java_envexport JAVA_HOME=/usr/local/jdk-1.8export JRE_HOME=/usr/local/jdk-1.8/jreexport CLASS_PATH=.:$CLASS_PATH:$JAVA_HOME/lib:$JRE_HOME/libexport PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin###set hadoop_envexport HADOOP_HOME=/home/hadoop/hadoop-2.5.2export HADOOP_COMMON_HOME=$HADOOP_HOMEexport HADOOP_HDFS_HOME=$HADOOP_HOMEexport HADOOP_MAPRED_HOME=$HADOOP_HOMEexport HADOOP_YARN_HOME=$HADOOP_HOMEexport HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoopexport PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/libexport HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/nativeexport HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"###set hive_envexportHIVE_HOME=/usr/local/apache-hive-0.14.0-binPATH=$PATH:$HIVE_HOME/bin###set zookeeper_envZOOKEEPER_HOME=/usr/local/zookeeper-3.4.6export ZOOKEEPER_HOMEexport PATH=$PATH:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf###set habase_envexport HBASE_MANAGES_ZK=false export HBASE_HOME=/usr/local/hbase-0.99.2使生效环境变量()source /etc/profile修改hadoop配置文件cd /home/hadoop/hadoop-2.5.2/etc/hadoopvi hadoop-env.shexport JAVA_HOME=/usr/local/jdk-1.8vi core-site.xmlvi hdfs-site.xm fs.default.name hdfs://Master:9000 hadoop.tmp.dir /home/hadoop/hadoop-2.4.0/tmp vi mapred-site.xml dfs.replication 2 dfs.namenode.name.dir file:/home/hadoop/hadoop-2.5.2/dfs/name dfs.datanode.data.dir file:/home/hadoop/hadoop-2.5.2/dfs/data vi yarn-site.xml[root@Master hadoop]# cat yarn-site.xml mapreduce.framework.name yarn mapreduce.jobhistory.address Master:10020 mapreduce.jobhistory.webapp.address Master:19888 vi slavesslave1 #如果有多个slave ,换行再写3,创建用户免密码登陆(这里是用户是root)ssh-keygen -t rsa 一直回车,同时在加一台slave也做这一件事cd /root/.ssh/cat id_rsa.pub >> authorized_keyschmod 600 authorized_keysscp -r authorized_keys root@192.168.11.101:/root/.ssh/ssh root@192.168.11.101 看是否需要密码登陆4,复制文件到另一台slavescp -r /home/hadoop/hadoop-2.5.2 root@192.168.11.101:/home/hadoop/5,启动cd /home/hadoop/hadoop-2.5.2#格式化HDFS文件./bin/hdfs namenode -format#启动hadoop/sbin/start-all.sh http://192.168.11.100:50070/http://192.168.11.100:8088/#查看进程Master/usr/local/jdk-1.8/bin/jps7744 Jps3218 ResourceManager3078 SecondaryNameNode7049 QuorumPeerMain2910 NameNodeslave/usr/local/jdk-1.8/bin/jps1644 NodeManager2573 QuorumPeerMain2734 Jps1551 DataNode6,apache-hive-0.14.0 配置6.1安装mysqlyum -y install mysql-server mysql-devel mysql-clientservice mysqld startmysql -uroot password ‘root’mysql -uroot -proot>create databases hive;6.2 修改配置文件vi /home/hadoop/hadoop-2.5.2/etc/hadoop/hive-site.xml yarn.nodemanager.aux-services mapreduce_shuffle yarn.nodemanager.aux-services.mapreduce.shuffle.class org.apache.hadoop.mapred.ShuffleHandler yarn.resourcemanager.address Master:8032 yarn.resourcemanager.scheduler.address Master:8030 yarn.resourcemanager.resource-tracker.address Master:8031 yarn.resourcemanager.admin.address Master:8033 yarn.resourcemanager.webapp.address Master:8088 7,zookeeper配置cd /usr/local/zookeeper-3.4.6/etcmv zoo_sample.cfg zoo.cfgvi zoo.cfgdataDir=/tmp/zookeeperserver.1=Master:2888:3888server.2=slave:2888:3888clientPort=2181scp -r /usr/local/zookeeper-3.4.6 root@192.168.11.101:/usr/local/echo “1” >> /tmp/zookeeper/myidecho “2” >> /tmp/zookeeper/myid #这个是在slave上执行的bin/zkServer.sh start #两边服务器都需要执行bin/zkCli.sh -server Master:2181 #客户端连接server#查看zookeeper服务器状态Master[root@Master zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: followerslave[root@slave zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: leader7.1 hbase配置vi /home/hadoop/hadoop-2.5.2/etc/hadoop/hbase-site.xml javax.jdo.option.ConnectionURL jdbc:mysql://localhost:3306/hive?characterEncoding=UTF-8 javax.jdo.option.ConnectionDriverName com.mysql.jdbc.Driver javax.jdo.option.ConnectionUserName root javax.jdo.option.ConnectionPassword linux hbase.rootdir hdfs://192.168.11.100:9000/hbase hbase.cluster.distributed true hbase.master hdfs://192.168.11.100:60000 vi /usr/local/hbase-0.99.2/conf /regionservers Masterslave7.2 复制到slavescp -r /usr/local/hbase-0.99.2 root@192.168.11.101:/usr/local/启动bin/start-hbase.sh #两边服务器都启动 hbase.zookeeper.quorum Master,slave1