我是靠谱客的博主 潇洒羽毛,最近开发中收集的这篇文章主要介绍hadoop2.6与hbase1.1.2单机版安装配置,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

一、hadoop2.6单机版安装配置

环境:

jdk版本:1.8(已完成安装及环境配置,路径:/usr/java/jdk1.8.0_65)

hadoop版本:2.6.0

spark版本:1.4.1

 

0、创建目录

[root@localhost ~]# mkdir /usr/local/hadoop

1、下载hadoop

hadoop-2.6.0.tar.gz

2、解压tar包

 [root@localhost hadoop]# tar -zxvf hadoop-2.6.0.tar.gz

3、profile环境变量配置

jdk的位置

[root@localhost lib]# cd /usr/java/jdk1.8.0_65

 

[root@localhost jdk1.8.0_65]# vim /etc/profile

#HADOOP VARIABLES START

export JAVA_HOME=/usr/java/jdk1.8.0_65

export HADOOP_INSTALL=/usr/local/hadoop/hadoop-2.6.0

export PATH=$PATH:$HADOOP_INSTALL/bin

export PATH=$PATH:$HADOOP_INSTALL/sbin

export HADOOP_MAPRED_HOME=$HADOOP_INSTALL

export HADOOP_COMMON_HOME=$HADOOP_INSTALL

export HADOOP_HDFS_HOME=$HADOOP_INSTALL

export YARN_HOME=$HADOOP_INSTALL

#HADOOP VARIABLES END

[root@localhost hadoop-2.6.0]# source ~/.bashrc  #让配置文件即刻生效

--------------------------------------- 或者更全面的可配置为如下:

#HADOOP VARIABLES START

export JAVA_HOME=/usr/java/jdk1.8.0_65

export HADOOP_HOME=/usr/local/hadoop/hadoop-2.6.0

export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:

$HADOOP_HOME/sbin

export HADOOP_MAPRED_HOME=$HADOOP_HOME

export HADOOP_COMMON_HOME=$HADOOP_HOME

export HADOOP_HDFS_HOME=$HADOOP_HOME

export YARN_HOME=$HADOOP_HOME

export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

export HADOOP_OPTS="-Djava.library.path-$HADOOP_HOME/lib"

#HADOOP VARIABLES END

 

 

4、修改hadoop-env.sh

位置为:[root@localhost hadoop]# pwd

/usr/local/hadoop/hadoop-2.6.0/etc/hadoop

[root@localhost hadoop]# vim hadoop-env.sh

#export JAVA_HOME=${JAVA_HOME}  #修改前

export JAVA_HOME=/usr/java/jdk1.8.0_65   #修改后

5、修改core-site.xml

<configuration>

        <property>

             <name>fs.defaultFS</name>

             <value>hdfs://localhost:9000</value>

        </property>

        <property>

              <name>hadoop.tmp.dir</name>

              <value>/usr/local/hadoop/hadoop-2.6.0/tmp</value>

        </property>

</configuration>

 

 

6、修改mapred-site.xml.template并修改为mapred-site.xml

<configuration>

    <property>

        <name>mapreduce.framework.name</name>

        <value>yarn</value>

    </property>

</configuration>

 

7、修改yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->

    <property>

          <name>yarn.nodemanager.aux-services</name>

          <value>mapreduce_shuffle</value>

    </property>

</configuration>

 

8、修改hdfs-site.xml

<configuration>

     <property>

          <name>dfs.replication</name>

          <value>1</value>

     </property>

     <property>

          <name>dfs.namenode.name.dir</name>

 <value>/usr/local/hadoop/hadoop-2.6.0/dfs/name</value>

     </property>

     <property>

           <name>dfs.datanode.data.dir</name> 

<value>/usr/local/hadoop/hadoop-2.6.0/dfs/data</value>

     </property>

     <property>

            <name>dfs.permissions</name>

             <value>false</value>

     </property>

</configuration>

 

9、修改masters和slaves

masters好像不存在,⾃自⾏行添加

插入:

[root@localhost hadoop]# vim slaves

localhost

[root@localhost hadoop]# vim masters

localhost

 

10、添加临时⽬目录

[root@localhost hadoop]# cd /usr/local/hadoop/hadoop-2.6.0

[root@localhost hadoop-2.6.0]# mkdir tmp dfs dfs/name dfs/data

 

11.初始化hdfs

[root@localhost hadoop-2.6.0]# hdfs namenode -format

 

12.启动hadoop

[root@localhost hadoop-2.6.0]# start-dfs.sh

[root@localhost hadoop-2.6.0]# start-yarn.sh

 

13、jps

[root@localhost hadoop-2.6.0]# jps

21008 DataNode

21152 SecondaryNameNode

20897 NameNode

21297 ResourceManager

21449 NodeManager

21690 Jps

 

14、基于hadoop2.6的spark环境变量配置

[root@localhost hadoop]# pwd  #版本间较大的改动!,1.X版本不同

/usr/local/hadoop/hadoop-2.6.0/etc/hadoop   #hadoop位置

 

[root@localhost conf]# pwd

/usr/local/spark/spark-1.4.1-bin-hadoop2.6/conf 

[root@localhost conf]# vim spark-env.sh   #配置环境变量

export SCALA_HOME=/usr/local/scala/scala-2.11.7

export JAVA_HOME=/usr/java/jdk1.8.0_65

export SPARK_MASTER_IP=192.168.31.157

export SPARK_WORKER_MEMORY=512m

export master=spark://192.168.31.157:7070

export HADOOP_CONF_DIR=/usr/local/hadoop/hadoop-2.6.0/etc/hadoop #新添加的部分

 

 

15、验证

[root@localhost sbin]# cd /usr/local/spark/spark-1.4.1-bin-hadoop2.6/sbin/

[root@localhost sbin]# pwd

/usr/local/spark/spark-1.4.1-bin-hadoop2.6/sbin

[root@localhost sbin]# ./start-all.sh  

[root@localhost sbin]# cd /usr/local/hadoop/hadoop-2.6.0/sbin/

[root@localhost sbin]# pwd

/usr/local/hadoop/hadoop-2.6.0/sbin

[root@localhost sbin]# ./start-all.sh  

 

[root@localhost sbin]# jps

5171 NameNode

6259 Worker

6071 Master

6328 Jps

5290 DataNode

5581 ResourceManager

5855 NodeManager

5439 SecondaryNameNode

 

二、hbase1.1.2单机版安装配置

0、创建目录

[root@localhost ~]# mkdir /usr/local/hbase

1、下载hbase

hbase-1.1.2-bin.tar.gz

2、解压tar

[root@localhost hbase]# tar -zxvf hbase-1.1.2-bin.tar.gz

3、修改环境变量

[root@localhost conf]# pwd

/usr/local/hbase/hbase-1.1.2/conf

[root@localhost conf]# vim hbase-env.sh

export JAVA_HOME=/usr/java/jdk1.8.0_65/

修改$JAVA_HOMEjdk安装⽬目录,这⾥里是/usr/java/jdk1.8.0_65/

4、修改hbase-site.xml

[root@localhost conf]# vim hbase-site.xml

添加:

<configuration>

      <property>

            <name>hbase.rootdir</name>

            <value>hdfs://localhost:9000/hbase</value>

      </property>

      <property>

             <name>hbase.cluster.distributed</name>

             <value>true</value>

      </property>

</configuration>

5、启动hbase

[root@localhost bin]# pwd

/usr/local/hbase/hbase-1.1.2/bin

[root@localhost bin]# ./start-hbase.sh

6、进入hbase shell

[root@localhost bin]# ./hbase shell

7、查看进程

[root@localhost bin]# jps

9298 HMaster

5171 NameNode

6259 Worker

8357 HQuorumPeer

6071 Master

9415 HRegionServer

5290 DataNode

9931 Jps

5581 ResourceManager

5855 NodeManager

5439 SecondaryNameNode

至此,hadoophbase都安装完成了,不过这只是单机版也可以说是伪分布式配

 

 

sparkhadoop相关问题解决

问题一:spark问题

[root@B sbin]# pwd

/usr/local/spark/spark-1.4.1-bin-hadoop2.6/sbin

[root@B sbin]# ./start-all.sh

org.apache.spark.deploy.master.Master running as process 3941.  Stop it first.

B: ssh: connect to host B port 22: No route to host

[root@B sbin]# jps

4661 Jps

3941 Master

 

原因:IP地址变更所致

问题解决1

 

检查1

[root@B sbin]# vim /etc/sysconfig/network

NETWORKING=yes

HOSTNAME=B.localdomain

检查2

[root@B sbin]# vim /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.31.132 B B.localdomain

检查3

[root@B sbin]# vim /usr/local/spark/spark-1.4.1-bin-hadoop2.6/conf/slaves

B

检查4

[root@B sbin]# vim /usr/local/spark/spark-1.4.1-bin-hadoop2.6/conf/spark-env.sh

export SCALA_HOME=/usr/local/scala/scala-2.11.7

export JAVA_HOME=/usr/java/jdk1.8.0_65

export SPARK_MASTER_IP=192.168.31.132

export SPARK_WORKER_MEMORY=512m

export master=spark://192.168.31.132:7070

 

验证:

[root@B sbin]# ./start-all.sh

[root@B sbin]# jps

3941 Master

4984 Jps

4909 Worker

 

 

 

问题2hbasesparkhadoop同时启动时,进程 HRegionServerWorkerWorkerSecondaryNameNode等启动不了的问题

 

启动顺序:

可以是:spark->Hadoop->hbase ,启动不了时,尝试关闭后重启。尤其是hadoop,有些进程可能会一次启动不来。

 

 

最后

以上就是潇洒羽毛为你收集整理的hadoop2.6与hbase1.1.2单机版安装配置的全部内容,希望文章能够帮你解决hadoop2.6与hbase1.1.2单机版安装配置所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(66)

评论列表共有 0 条评论

立即
投稿
返回
顶部