大数据集群部署

1、zookeeper部署

wget http://apache.fayea.com/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz

tar xf zookeeper-3.4.10.tar.gz 

cd zookeeper-3.4.10

cp -a zoo_sample.cfg zoo.cfg

[root@node1 opt]# cat /opt/zookeeper-3.4.10/conf/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper-3.4.10/data
dataLogDir=/opt/zookeeper-3.4.10/logs
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=10.61.98.71:2888:3888
server.2=10.61.98.72:2888:3888
server.3=10.61.98.73:2888:3888

scp -rp zookeeper-3.4.10 node2:/opt/

scp -rp zookeeper-3.4.10 node3:/opt/

2、hadoop部署

wget http://apache.01link.hk/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz

groupadd hadoop

useradd -m -g hadoop hadoop

tar xf hadoop-2.6.5.tar.gz 

cd hadoop-2.6.5

cd /opt/hadoop-2.6.5/etc/hadoop

[root@node1 hadoop]# egrep -v "^#|^$" core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- hdfs地址,ha模式中是连接到nameservice -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<!-- 这里的路径默认是NameNode、DataNode、JournalNode等存放数据的公共目录,也可以单独指定 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop-2.6.5/tmp</value>
</property>
<!-- 指定ZooKeeper集群的地址和端口。注意,数量一定是奇数,且不少于三个节点-->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定副本数,不能超过机器节点数 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 为namenode集群定义一个services name -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- nameservice 包含哪些namenode,为各个namenode起名 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>node1,node2</value>
</property>
<!-- 名为master188的namenode的rpc地址和端口号,rpc用来和datanode通讯 -->
<property>
<name>dfs.namenode.rpc-address.ns1.node1</name>
<value>node1:9000</value>
</property>
<!-- 名为master189的namenode的rpc地址和端口号,rpc用来和datanode通讯 -->
<property>
<name>dfs.namenode.rpc-address.ns1.node2</name>
<value>node2:9000</value>
</property>
<!--名为master188的namenode的http地址和端口号,用来和web客户端通讯 -->
<property>
<name>dfs.namenode.http-address.ns1.node1</name>
<value>node1:50070</value>
</property>
<!-- 名为master189的namenode的http地址和端口号,用来和web客户端通讯 -->
<property>
<name>dfs.namenode.http-address.ns1.node2</name>
<value>node2:50070</value>
</property>

<!-- namenode间用于共享编辑日志的journal节点列表 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/ns1</value>
</property>
<!-- 指定该集群出现故障时,是否自动切换到另一台namenode -->
<property>
<name>dfs.ha.automatic-failover.enabled.ns1</name>
<value>true</value>
</property>
<!-- journalnode 上用于存放edits日志的目录 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/hadoop-2.6.5/tmp/data/dfs/journalnode</value>
</property>
<!-- 客户端连接可用状态的NameNode所用的代理类 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 一旦需要NameNode切换,使用ssh方式进行操作 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 如果使用ssh进行故障切换,使用ssh通信时用的密钥存储的位置 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- connect-timeout超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<!-- 采用yarn作为mapreduce的资源调度框架 -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- 启用HA高可用性 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 指定resourcemanager的名字 -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yrc</value>
</property>
<!-- 使用了2个resourcemanager,分别指定Resourcemanager的地址 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>

<!-- 指定rm1的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node1</value>
</property>

<!-- 指定rm2的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node2</value>
</property>

<!-- 指定当前机器master188作为rm1 -->
<property>
<name>yarn.resourcemanager.ha.id</name>
<value>rm1</value>
</property>

<!-- 指定zookeeper集群机器 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>

<!-- NodeManager上运行的附属服务,默认是mapreduce_shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" slaves
node1
node2
node3

scp -r hadoop-2.6.5 hadoop02:/opt/
scp -r hadoop-2.6.5 hadoop03:/opt/

修改yarn-site.xml

在node2机器,即ResourceManager备用主节点上修改如下属性,表示当前机器作为rm2::

 
  <property>
    <name>yarn.resourcemanager.ha.id</name>
    <value>rm2</value>
  </property>

启动Hadoop

cd hadoop-2.6.5/sbin/

./hadoop-daemons.sh start journalnode
启动前需要先初始话JournalNode.
2.0 登陆journalnode_1,journalnode_2,journalnode_3机器, 执行 sbin/hadoop-daemon.sh start journalnode
2.1 格式化nn1, 执行 bin/hadoop namenode -format
./hdfs zkfc -formatZK

2.2 在nn1上,执行 bin/hdfs namenode -initializeSharedEdits
2.3 启动nn1, 执行 sbin/hadoop-daemon.sh start namenode
2.4 登录nn2,拉取nn1 的元数据(注意,nn2无需进行格式化), bin/hdfs namenode -bootstrapStandby
2.5 启动nn2, sbin/hadoop-daemon.sh start namenode

5)启动HDFS、YARN、ZookeeperFailoverController
./start-dfs.sh

./start-yarn.sh

./hadoop-daemon.sh start zkfc

 在node2机器上,启动ResourceManager,备用主节点的ResourceManager需要手动启动:

yarn-daemon.sh start resourcemanager

6)查看Namenode、ResourceManager状态

hdfs haadmin -getServiceState master188
yarn rmadmin -getServiceState rm1

hdfs haadmin -getServiceState master189
yarn rmadmin -getServiceState rm2

也可以通过Web界面来查看,浏览器中输入 ip:50070 查看HDFS,输入 ip:8088/cluster/cluster 查看YARN。

7)测试高可用
a.主节点--->备用主节点

kill掉主节点的namenode,查看备用主节点的namenode状态是否切换为active;

kill掉主节点的ResourceManager,查看备用主节点的ResourceManager是否切换为active;

b.备用主节点--->主节点

若上述操作执行成功,那么再测试反向故障自动转移

先启动被杀死的原主节点的namenode和ResourceManager

hadoop-daemon.sh start namenode 

yarn-daemon.sh start resourcemanager

再kill备用主节点的namenode和ResourceManager,查看主节点的状态,若能切换为active,那么Hadoop HA高可用集群搭建完成。

五、安装配置HBase

1、下载及安装

下载地址:http://mirrors.hust.edu.cn/ap...

在master188机器上,解压到/home/hadoop/目录下:

tar -zxvf hbase-1.3.1-bin.tar.gz

2、配置

进入hbase-1.3.1/conf/目录,修改配置文件:

cd hbase-1.3.1/conf/
1)vi hbase-env.sh
//配置JDK
export JAVA_HOME=/opt/jdk

//保存pid文件
export HBASE_PID_DIR=/home/hadoop/data/hbase/pids

//修改HBASE_MANAGES_ZK,禁用HBase自带的Zookeeper,因为我们是使用独立的Zookeeper
export HBASE_MANAGES_ZK=false
2)vi hbase-site.xml
<configuration>
  <!-- 设置HRegionServers共享目录,请加上端口号 -->
  <property>
    <name>hbase.rootdir</name>
    <value>hdfs://master188:9000/hbase</value>
  </property>

  <!-- 指定HMaster主机 -->
  <property>
    <name>hbase.master</name>
    <value>hdfs://master188:60000</value>
  </property>

  <!-- 启用分布式模式 -->
  <property>
    <name>hbase.cluster.distributed</name>
    <value>true</value>
  </property>

  <!-- 指定Zookeeper集群位置 -->
  <property>
    <name>hbase.zookeeper.quorum</name>
    <value>master188:2181,master189:2181,slave190:2181</value>
  </property>

  <!-- 指定独立Zookeeper安装路径 -->
  <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/home/hadoop/zookeeper-3.4.11</value>
  </property>

  <!-- 指定ZooKeeper集群端口 -->
  <property>
    <name>hbase.zookeeper.property.clientPort</name>
    <value>2181</value>
  </property>
</configuration>
3)vi regionservers

修改regionservers文件,因为当前是使用独立的Zookeeper集群,所以要指定RegionServers所在机器:


master188
master189
slave190
4)创建pid文件保存目录

在/home/hadoop/目录下:

mkdir data/hbase/pids -p

3、拷贝HBase到其他机器

cd /home/hadoop/

scp -r hbase-1.3.1 hadoop@master189:/home/hadoop/

scp -r hbase-1.3.1 hadoop@slave190:/home/hadoop/

4、启动HBase

在主节点上启动HBase(这里的主节点是指NameNode状态为active的节点,而非指文中对本实验的机器声明):

cd hbase-1.3.1/bin

./start-hbase.sh

//查看HMaster、Regionserver进程是否启动
jps
注意:此时Hadoop集群应处于启动状态,并且是在主节点执行start-hbase.sh启动HBase集群,否则HMaster进程将在启动几秒后消失,而备用的HMaster进程需要在备用主节点单独启动,命令是:./hbase-daemon.sh start master

在备用主节点启动HMaster进程,作为备用HMaster:

cd hbase-1.3.1/bin

./hbase-daemon.sh start master

5、HA高可用测试

在浏览器中输入 ip:16010 ,查看主节点和备用主节点上的HMaster的状态,在备用主节点的web界面中,可以看到“Current Active Master: master188”,表示当前HBase主节点是master188机器;

主节点--->备用主节点
这里的主节点指使用start-hbase.sh命令启动HBase集群的机器

kill掉主节点的HMaster进程,在浏览器中查看备用主节点的HBase是否切换为active;

若上述操作成功,则在主节点启动被杀死的HMaster进程:

cd hbase-1.3.1/bin/

./hbase-daemon.sh start master

然后,kill掉备用主节点的HMaster进程,在浏览器中查看主节点的HBase是否切换为active,若操作成功,则HBase高可用集群搭建完成;

6、HBase基本操作

//启动HBase
[root@vnet ~] start-hbase.sh

//进入HBase Shell
[root@vnet ~] hbase shell

//查看当前HBase有哪些表
hbase(main):> list

//创建表t_user,cf1和cf2是列族,列族一般不超过3个
hbase(main):> create 't_user','cf1','cf2'

//获得表t_user的描述信息
hbase(main):> describe 't_user'

//禁用表
hbase(main):> disable 't_user'

//删除表,删除表之前要先把表禁用掉
hbase(main):> drop 't_user'

//查询表是否存在
hbase(main):> exists 't_user'

//查看全表数据
hbase(main):> scan 't_user'

//插入数据,分别是表名、key、列(列族:具体列)、值。HBase是面向列的数据库,列可无限扩充
hbase(main):> put 't_user' ,'001','cf1:name','chenxj'
hbase(main):> put 't_user' ,'001','cf1:age','18'
hbase(main):> put 't_user' ,'001','cf2:sex','man'
hbase(main):> put 't_user' ,'002','cf1:name','chenxj'
hbase(main):> put 't_user' ,'002','cf1:address','fuzhou'
hbase(main):> put 't_user' ,'002','cf2:sex','man'

//获取数据,可根据key、key和列族等进行查询
hbase(main):> get 't_user','001'
hbase(main):> get 't_user','002','cf1'
hbase(main):> get 't_user','001','cf1:age'

六、集群启动结果

Hadoop + Zookeeper + HBase 高可用集群启动后,进程状态如下:

描述node1  node2node3
HDFS主 NameNode NameNode
HDFS从 DataNode DataNode DataNode
YARN主 ResourceManager ResourceManager
YARN从 NodeManager NodeManager NodeManager
HBase主 HMaster HMaster
HBase从 HRegionServer HRegionServer HRegionServer
Zookeeper独立进程 QuorumPeerMain QuorumPeerMain QuorumPeerMain
NameNodes数据同步 JournalNode JournalNode JournalNode
主备故障切换 DFSZKFailoverController DFSZKFailoverController

七、总结

需要注意的地方:

1)备用节点上的NameNode、ResourceManager、HMaster均需单独启动;
hadoop-daemon.sh start namenode

yarn-daemon.sh start resourcemanager

hbase-daemon.sh start master 
2)可以使用-forcemanual参数强制切换主节点与备用主节点,但强制切换后集群的自动故障转移将会失效,需要重新格式化zkfc:hdfs zdfc -formatZK;
#hdfs haadmin -transitionToActive/transitionToStandby  -forcemanual  master node2
hdfs haadmin    -transitionToActive --forceactive node2  -forcemanual yarn rmadmin -transitionToActive/transitionToStandby -forcemanual rm2
3)在备用主节点同步主节点的元数据时,主节点的HDFS必须已经启动;

   4)无法查看standby状态的节点上的hdfs;

   5)格式化namenode时要先启动各个JournalNode机器上的journalnode进程:hadoop-daemon.sh start journalnode

   6)若遇到问题,可以先考虑是哪个组件出现问题,然后查看该组件或与该组件相关的组件的日志信息;若各组件web页面无法访问,或存在其他连接问题,可以从「防火墙是否关闭」、「端口是否被占用」、「SSH」、「集群机器是否处于同一网段」内等角度考虑;

3、spark集群安装
spark on yarn 模式

Spark安装

下载解压

进入官方下载地址下载最新版 Spark。我下载的是 spark-1.3.0-bin-hadoop2.4.tgz

~/workspace目录下解压

tar -zxvf spark-1.3.0-bin-hadoop2.4.tgz
mv spark-1.3.0-bin-hadoop2.4 spark-1.3.0 #原来的文件名太长了,修改下

配置 Spark

cd ~/workspace/spark-1.3.0/conf #进入spark配置目录
cp spark-env.sh.template spark-env.sh #从配置模板复制
vi spark-env.sh #添加配置内容

spark-env.sh末尾添加以下内容(这是我的配置,你可以自行修改):

export SCALA_HOME=/home/spark/workspace/scala-2.10.4
export JAVA_HOME=/home/spark/workspace/jdk1.7.0_75
export HADOOP_HOME=/home/spark/workspace/hadoop-2.6.0
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
SPARK_MASTER_IP=master
SPARK_LOCAL_DIRS=/home/spark/workspace/spark-1.3.0
SPARK_DRIVER_MEMORY=1G

注:在设置Worker进程的CPU个数和内存大小,要注意机器的实际硬件条件,如果配置的超过当前Worker节点的硬件条件,Worker进程会启动失败。

vi slaves在slaves文件下填上slave主机名:

slave1
slave2

将配置好的spark-1.3.0文件夹分发给所有slaves吧

scp -r ~/workspace/spark-1.3.0 spark@slave1:~/workspace/

启动Spark

sbin/start-all.sh

验证 Spark 是否安装成功

jps检查,在 master 上应该有以下几个进程:

$ jps
7949 Jps
7328 SecondaryNameNode
7805 Master
7137 NameNode
7475 ResourceManager

在 slave 上应该有以下几个进程:

$jps
3132 DataNode
3759 Worker
3858 Jps
3231 NodeManager

进入Spark的Web管理页面: http://master:8080

运行示例

#本地模式两线程运行
./bin/run-example SparkPi 10 --master local[2]
 
#Spark Standalone 集群模式运行
./bin/spark-submit
--class org.apache.spark.examples.SparkPi
--master spark://master:7077
lib/spark-examples-1.3.0-hadoop2.4.0.jar
100
 
#Spark on YARN 集群上 yarn-cluster 模式运行
./bin/spark-submit
--class org.apache.spark.examples.SparkPi
--master yarn-cluster # can also be `yarn-client`
lib/spark-examples*.jar
10

注意 Spark on YARN 支持两种运行模式,分别为yarn-clusteryarn-client,具体的区别可以看这篇博文,从广义上讲,yarn-cluster适用于生产环境;而yarn-client适用于交互和调试,也就是希望快速地看到application的输出。



hive

Hive 简介

Hive 是一个基于 hadoop 的开源数据仓库工具,用于存储和处理海量结构化数据。它把海量数据存储于 hadoop 文件系统,而不是数据库,但提供了一套类数据库的数据存储和处理机制,并采用 HQL (类 SQL )语言对这些数据进行自动化管理和处理。我们可以把 Hive 中海量结构化数据看成一个个的表,而实际上这些数据是分布式存储在 HDFS 中的。 Hive 经过对语句进行解析和转换,最终生成一系列基于 hadoop 的 map/reduce 任务,通过执行这些任务完成数据处理。

Hive 诞生于 facebook 的日志分析需求,面对海量的结构化数据, Hive 以较低的成本完成了以往需要大规模数据库才能完成的任务,并且学习门槛相对较低,应用开发灵活而高效。

Hive 自 2009.4.29 发布第一个官方稳定版 0.3.0 至今,不过一年的时间,正在慢慢完善,网上能找到的相关资料相当少,尤其中文资料更少,本文结合业务对 Hive 的应用做了一些探索,并把这些经验做一个总结,所谓前车之鉴,希望读者能少走一些弯路。

准备工作

环境

JDK:1.8  
Hadoop Release:2.7.4  
centos:7.3  

node1(master)  主机: 192.168.252.121  
node2(slave1)  从机: 192.168.252.122  
node3(slave2)  从机: 192.168.252.123  

node4(mysql)   从机: 192.168.252.124  

依赖环境

安装Apache Hive前提是要先安装hadoop集群,并且hive只需要在hadoop的namenode节点集群里安装即可(需要在有的namenode上安装),可以不在datanode节点的机器上安装。还需要说明的是,虽然修改配置文件并不需要把hadoop运行起来,但是本文中用到了hadoop的hdfs命令,在执行这些命令时你必须确保hadoop是正在运行着的,而且启动hive的前提也需要hadoop在正常运行着,所以建议先把hadoop集群启动起来。

安装MySQL 用于存储 Hive 的元数据(也可以用 Hive 自带的嵌入式数据库 Derby,但是 Hive 的生产环境一般不用 Derby),这里只需要安装 MySQL 单机版即可,如果想保证高可用的化,也可以部署 MySQL 主从模式;

Hadoop

Hadoop-2.7.4 集群快速搭建

MySQL 随意任选其一

CentOs7.3 安装 MySQL 5.7.19 二进制版本

搭建 MySQL 5.7.19 主从复制,以及复制实现细节分析

安装

下载解压

su hadoop
cd /home/hadoop/
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-2.3.0/apache-hive-2.3.0-bin.tar.gz
tar -zxvf apache-hive-2.3.0-bin.tar.gz
mv apache-hive-2.3.0-bin hive-2.3.0

环境变量

如果是对所有的用户都生效就修改vi /etc/profile 文件
如果只针对当前用户生效就修改 vi ~/.bahsrc 文件

sudo vi /etc/profile
#hive
export PATH=${HIVE_HOME}/bin:$PATH
export HIVE_HOME=/home/hadoop/hive-2.3.0/

使环境变量生效,运行 source /etc/profile使/etc/profile文件生效

Hive 配置 Hadoop HDFS

复制 hive-site.xml

cd /home/hadoop/hive-2.3.0/conf
cp hive-default.xml.template hive-site.xml

新建 hdfs 目录

使用 hadoop 新建 hdfs 目录,因为在 hive-site.xml 中有默认如下配置:

<property>
    <name>hive.metastore.warehouse.dir</name>
    <value>/user/hive/warehouse</value>
    <description>location of default database for the warehouse</description>
  </property>
  <property>

进入 hadoop 安装目录 执行hadoop命令新建/user/hive/warehouse目录,并授权,用于存储文件

cd /home/hadoop/hadoop-2.7.4

bin/hadoop fs -mkdir -p /user/hive/warehouse  
bin/hadoop fs -mkdir -p /user/hive/tmp  
bin/hadoop fs -mkdir -p /user/hive/log  
bin/hadoop fs -chmod -R 777 /user/hive/warehouse  
bin/hadoop fs -chmod -R 777 /user/hive/tmp  
bin/hadoop fs -chmod -R 777 /user/hive/log  

用以下命令检查目录是否创建成功

bin/hadoop fs -ls /user/hive

修改 hive-site.xml

搜索hive.exec.scratchdir,将该name对应的value修改为/user/hive/tmp

<property>  
    <name>hive.exec.scratchdir</name>  
    <value>/user/hive/tmp</value>  
</property>  

搜索hive.querylog.location,将该name对应的value修改为/user/hive/log/hadoop

<property>
    <name>hive.querylog.location</name>
    <value>/user/hive/log/hadoop</value>
    <description>Location of Hive run time structured log file</description>
</property>

搜索javax.jdo.option.connectionURL,将该name对应的value修改为MySQL的地址

<property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://192.168.252.124:3306/hive?createDatabaseIfNotExist=true</value>
    <description>
      JDBC connect string for a JDBC metastore.
      To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
      For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
    </description>
  </property>

搜索javax.jdo.option.ConnectionDriverName,将该name对应的value修改为MySQL驱动类路径

<property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    <description>Driver class name for a JDBC metastore</description>
</property>

搜索javax.jdo.option.ConnectionUserName,将对应的value修改为MySQL数据库登录名

<property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>root</value>
    <description>Username to use against metastore database</description>
</property>

搜索javax.jdo.option.ConnectionPassword,将对应的value修改为MySQL数据库的登录密码

<property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>mima</value>
    <description>password to use against metastore database</description>
</property>

创建 tmp 文件

mkdir /home/hadoop/hive-2.3.0/tmp

并在 hive-site.xml 中修改

{system:java.io.tmpdir} 改成 /home/hadoop/hive-2.3.0/tmp

把 {system:user.name} 改成 {user.name}

新建 hive-env.sh

cp hive-env.sh.template hive-env.sh

vi hive-env.sh

HADOOP_HOME=/home/hadoop/hadoop-2.7.4/
export HIVE_CONF_DIR=/home/hadoop/hive-2.3.0/conf
export HIVE_AUX_JARS_PATH=/home/hadoop/hive-2.3.0/lib

下载 mysql 驱动包

cd /home/hadoop/hive-2.3.0/lib

wget http://central.maven.org/maven2/mysql/mysql-connector-java/5.1.38/mysql-connector-java-5.1.38.jar

初始化 mysql

MySQL数据库进行初始化

首先确保 mysql 中已经创建 hive 库

cd /home/hadoop/hive-2.3.0/bin
./schematool -initSchema -dbType mysql

如果看到如下,表示初始化成功

Starting metastore schema initialization to 2.3.0
Initialization script hive-schema-2.3.0.mysql.sql
Initialization script completed
schemaTool completed

查看 mysql 数据库

/usr/local/mysql/bin/mysql -uroot -p
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| hive               |
| mysql              |
| performance_schema |
| sys                |
+--------------------+
5 rows in set (0.00 sec)
mysql> use hive;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+---------------------------+
| Tables_in_hive            |
+---------------------------+
| AUX_TABLE                 |
| BUCKETING_COLS            |
| CDS                       |
| COLUMNS_V2                |
| COMPACTION_QUEUE          |
| COMPLETED_COMPACTIONS     |
| COMPLETED_TXN_COMPONENTS  |
| DATABASE_PARAMS           |
| DBS                       |
| DB_PRIVS                  |
| DELEGATION_TOKENS         |
| FUNCS                     |
| FUNC_RU                   |
| GLOBAL_PRIVS              |
| HIVE_LOCKS                |
| IDXS                      |
| INDEX_PARAMS              |
| KEY_CONSTRAINTS           |
| MASTER_KEYS               |
| NEXT_COMPACTION_QUEUE_ID  |
| NEXT_LOCK_ID              |
| NEXT_TXN_ID               |
| NOTIFICATION_LOG          |
| NOTIFICATION_SEQUENCE     |
| NUCLEUS_TABLES            |
| PARTITIONS                |
| PARTITION_EVENTS          |
| PARTITION_KEYS            |
| PARTITION_KEY_VALS        |
| PARTITION_PARAMS          |
| PART_COL_PRIVS            |
| PART_COL_STATS            |
| PART_PRIVS                |
| ROLES                     |
| ROLE_MAP                  |
| SDS                       |
| SD_PARAMS                 |
| SEQUENCE_TABLE            |
| SERDES                    |
| SERDE_PARAMS              |
| SKEWED_COL_NAMES          |
| SKEWED_COL_VALUE_LOC_MAP  |
| SKEWED_STRING_LIST        |
| SKEWED_STRING_LIST_VALUES |
| SKEWED_VALUES             |
| SORT_COLS                 |
| TABLE_PARAMS              |
| TAB_COL_STATS             |
| TBLS                      |
| TBL_COL_PRIVS             |
| TBL_PRIVS                 |
| TXNS                      |
| TXN_COMPONENTS            |
| TYPES                     |
| TYPE_FIELDS               |
| VERSION                   |
| WRITE_SET                 |
+---------------------------+
57 rows in set (0.00 sec)

启动 Hive

简单测试

启动Hive

cd /home/hadoop/hive-2.3.0/bin

./hive

创建 hive 库

hive>  create database ymq;
OK
Time taken: 0.742 seconds

选择库

hive> use ymq;
OK
Time taken: 0.036 seconds

创建表

hive> create table test (mykey string,myval string);
OK
Time taken: 0.569 seconds

插入数据

hive> insert into test values("1","www.ymq.io");

WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Query ID = hadoop_20170922011126_abadfa44-8ebe-4ffc-9615-4241707b3c03
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1506006892375_0001, Tracking URL = http://node1:8088/proxy/application_1506006892375_0001/
Kill Command = /home/hadoop/hadoop-2.7.4//bin/hadoop job  -kill job_1506006892375_0001
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2017-09-22 01:12:12,763 Stage-1 map = 0%,  reduce = 0%
2017-09-22 01:12:20,751 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 1.24 sec
MapReduce Total cumulative CPU time: 1 seconds 240 msec
Ended Job = job_1506006892375_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to directory hdfs://node1:9000/user/hive/warehouse/ymq.db/test/.hive-staging_hive_2017-09-22_01-11-26_242_8022847052615616955-1/-ext-10000
Loading data to table ymq.test
MapReduce Jobs Launched: 
Stage-Stage-1: Map: 1   Cumulative CPU: 1.24 sec   HDFS Read: 4056 HDFS Write: 77 SUCCESS
Total MapReduce CPU Time Spent: 1 seconds 240 msec
OK
Time taken: 56.642 seconds

查询数据

hive> select * from test;
OK
1    www.ymq.io
Time taken: 0.253 seconds, Fetched: 1 row(s)

页面数据

在界面上查看刚刚写入的hdfs数据

图片描述

图片描述

 
原文地址:https://www.cnblogs.com/mylovelulu/p/10334600.html