先电云计算BigData
先电云计算BigData
基础平台搭建
master#
hostnamectl set-hostname master
//修改主机名
hostname
logout
vi /etc/hosts
//添加本地域名解析
192.168.200.131 master.hadoop master
192.168.200.133 slave1.hadoop
ping slave1.hadoop
//检测
slave1#
hostnamectl set-hostname slave1
hostname
logout
vi /etc/hosts
192.168.200.131 master.hadoop
192.168.200.133 slave1.hadoop slave1
ping master.hadoop
master#
mv /etc/yum.repos.d/* /home
vi /etc/yum.repos.d/ambari.repo
//新建yum源配置文件
[centos]
name=centos
baseurl=ftp://192.168.100.10/centos/
enabled=1
gpgcheck=0
[ambari]
name=ambari
baseurl=file:///opt/ambari/ambari-2.6.0.0
enabled=1
gpgcheck=0
mkdir /opt/ambari
mount XianDian-BigData-v2.2.iso /opt/ambari/
yum clean all
yum list
yum install -y vsftpd
//搭建ftp服务器
vi /etc/vsftpd/vsftpd.conf
//修改ftp根目录
anon_root=/opt
vi /etc/selinux/config
SELINUX=enforcing=>SELINUX=disabled //disabled需要修改文件后重启生效,permissive也可
setenforce 0
//设为Permissive,1为enforcing
getenforce
//获得Selinux的状态
Permissive
systemctl disable firewalld
//关闭防火墙和Selinux,先电镜像是关闭的无需操作
systemctl stop firewalld
systemctl enable vsftpd
systemctl start vsftpd
slave1#
mv /etc/yum.repos.d/* /home
vi /etc/yum.repos.d/ambari.repo
[centos]
name=centos
baseurl=ftp://192.168.100.10/centos/
enabled=1
gpgcheck=0
[ambari]
name=ambari
baseurl=ftp://192.168.200.131/ambari/ambari-2.6.0.0
enabled=1
gpgcheck=0
yum clean all
yum list
vi /etc/selinux/config
SELINUX=enforcing => SELINUX=disabled //disabled需要修改文件后重启生效,permissive也可
setenforce 0
//设为Permissive,1为enforcing
getenforce
//获得Selinux的状态
Permissive
systemctl disable firewalld
//关闭防火墙和Selinux,先电镜像是关闭的无需操作
systemctl stop firewalld
master#
yum -y install httpd
cp -r /opt/ambari/HDP-* /var/www/html/
systemctl enable httpd
systemctl start httpd
yum -y install ntp
vi /etc/ntp.conf
server 0.centos.pool.ntp.org iburst //将原有的四条注释掉
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
server 127.127.1.0 //添加两行
fudge 127.127.1.0 startum 10
systemctl enable ntpd
systemctl start ntpd
slave1
yum install -y ntpdate
ntpdate master.hadoop
systemctl enable ntpdate
master#
ssh slave1.hadoop
//检测能否用无密码公钥认证访问(第一次需要用确认使用公钥访问),若不能则需要配置无密码公钥访问 使用exit退出连接
ssh-keygen
//生成公钥
ssh-copy-id slave1.hadoop
//将公钥复制到目标主机
slave1#
ssh master1.hadoop#
ssh-keygen
ssh-copy-id slave1.hadoop
master&slave1#
cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never //Transparent Huge Pages状态为开启的,需要关闭,该文件不可编辑,不能永久修改所有修改==重启后失效==
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
cat /sys/kernel/mm/transparent_hugepage/enabled
always madvise [never] //关闭状态
master#
mkdir /usr/jdk64/
tar -zxvf /opt/ambari/jdk-8u77-linux-x64.tar.gz -C /usr/jdk64/
vi /etc/profile
export JAVA_HOME=/usr/jdk64/jdk1.8.0_77
export PATH=$JAVA_HOME/bin:$PATH
source /etc/profile
java -version
java version "1.8.0_77"
Java(TM) SE Runtime Environment (build 1.8.0_77-b03)
Java HotSpot(TM) 64-Bit Server VM (build 25.77-b03, mixed mode)
scp /opt/ambari/jdk-8u77-linux-x64.tar.gz 192.168.200.133:/opt
//通过ssh的scp将文件发到目标机
slave1#
mkdir /usr/jdk64jdk1.8.0_77/
tar -zxvf /opt/jdk-8u77-linux-x64.tar.gz -C /usr/jdk64/
vi /etc/profile
export JAVA_HOME=/usr/jdk64/jdk1.8.0_77
export PATH=$JAVA_HOME/bin:$PATH
source /etc/profile
java -version
java version "1.8.0_77"
Java(TM) SE Runtime Environment (build 1.8.0_77-b03)
Java HotSpot(TM) 64-Bit Server VM (build 25.77-b03, mixed mode)
配置ambari
master#
yum -y install ambari-server
yum -y install mariadb mariadb-server mysql-connector-java
systemctl enable mariadb
systemctl start mariadb
mysql_secure_installation
Enter current password for root (enter for none): //初始时无密码自接回车
Set root password? [Y/n]
y
//设置root密码为“bigdata
”New password: //填入密码是不显示的
Re-enter new password:
Remove anonymous users? [Y/n]
y
//删除匿名用户Disallow root login remotely? [Y/n]
n
//是否禁用root远程登录Remove test database and access to it? [Y/n]
y
//删除测试数据库Reload privilege tables now? [Y/n]
y
//从新加载权限表
mysql -uroot -pbigdata
MariaDB [(none)]>
create database ambari;
MariaDB [(none)]>
grant all privileges on ambari.* to 'ambari'@'localhost' identified by 'bigdata';
MariaDB [(none)]>
grant all privileges on ambari.* to 'ambari'@'%' identified by 'bigdata';
MariaDB [(none)]>
use ambari;
MariaDB [ambari]>
source /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql
MariaDB [ambari]>quit
vi /etc/profile
export buildNumber=2.6.0.0
ambari-server setup
Customize user account for ambari-server daemon [y/n] (n)?
n
Checking JDK...
[1] Oracle JDK 1.8 + Java Cryptography Extension (JCE) Policy Files 8
[2] Oracle JDK 1.7 + Java Cryptography Extension (JCE) Policy Files 7
[3] Custom JDk
================================
Enter choice (1):
3
Path to JAVA_HOME:
/usr/jdk64/jdk1.8.0_77
Enter advanced database configuration [y/n] (n)?
y
Configuring database...
================================
Choose one of the following options:
[1] - PostgreSQL (Embedded)
[2] - Oracle
[3] - MySQL / MariaDB
[4] - PostgreSQL
[5] - Microsoft SQL Server (Tech Preview)
[6] - SQL Anywhere
[7] - BDB
================================
Enter choice (1):
3
Hostname (localhost):
Port (3306):
Database name (ambari):
Username (ambari):
Enter Database Password (bigdata):
Proceed with configuring remote database connection properties [y/n] (y)?
ambari-server start
admin/admin //用户名和密码
master&slave1#
yum install -y ambari-agent
vi /etc/ambari-agent/conf/ambari-agent.ini
[servier]
hostname=localhost => hostname=master.hadoop
systemctl start ambari-agent
ambari-agent restart
tail -f /var/log/ambari-agent/ambari-agent.log
在python 2.7.5及以上版本时,增加了certificate verification,正是因为这个特性导致ambari-agent无法连接server
//解决方法:修改/etc/python/cert-verification.cfg配置文件:verify=platform_default ###(这是默认配置)
修改为verify=disable,然后重启ambari agent,就可以正常注册
HDFS
YARN+MapReduce2
ZooKeeper
Amvari Metrics
添加Hive服务前需要先对数据库进行操作
-uroot -pbigdata
MariaDB [(none)]> create database hive;
MariaDB [(none)]> grant all privileges on hive.* to 'hive'@'localhost' identified by 'bigdata';
MariaDB [(none)]> grant all privileges on hive.* to 'hive'@'%' identified by 'bigdata';
Hive
HBase
Mahout
Pig
Flume
Spark2