hostnamectl set-hostname master hostnamectl set-hostname slave1 hostnamectl set-hostname slave2 hostnamectl set-hostname slave3 bash
172.18.32.165 master 172.18.32.167 slave1 172.18.32.169 slave2 172.18.32.171 slave3
systemctl stop firewalld systemctl status firewalld tzselect 5911 echo "TZ='Asia/Shanghai'; export TZ" >> /etc/profile && source /etc/profile
vim /etc/ntp.conf server 127.127.1.0 fudge 127.127.1.0 stratum10
/bin/systemctl restart ntpd.service
ntpdate master crontab -e */30 10-17 * * * /usr/sbin/ntpdate master crontab -l
ssh-keygen cd /root/.ssh ssh-copy-id -i id_rsa.pub slave1 ssh-copy-id -i id_rsa.pub slave2 ssh-copy-id -i id_rsa.pub slave3 Hongta@123
ssh master ssh slave1
mkdir -p /usr/java tar -zxvf jdk-8u171-linux-x64.tar.gz -C /usr/java cd /usr/java vim /etc/profile source /etc/profile java]# java -version scp -r /usr/java root@slave1:/usr/ scp -r /usr/java root@slave2:/usr/ scp -r /usr/java root@slave3:/usr/ vim /etc/profile source /etc/profile
mkdir -p /usr/zookeeper tar -zxvf zookeeper-3.4.10.tar.gz -C /usr/zookeeper vim /etc/profile vim zoo.cfg source /etc/profile mkdir zkdata zkdatalog
scp -r /usr/zookeeper root@slave1:/usr/ scp -r /usr/zookeeper root@slave2:/usr/ scp -r /usr/zookeeper root@slave3:/usr/ vim /etc/profile source /etc/profile zkServer.sh start zkServer.sh status
mkdir -p /usr/hadoop tar -zxvf hadoop-2.7.3.tar.gz -C /usr/hadoop/ vim /etc/profile source /etc/profile
cd hadoop-2.7.3/etc/hadoop/ vim hadoop-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_171
vim hadoop-env.sh vim core-site.xml vim hdfs-site.xml vim yarn-env.sh vim yarn-site.xml cp mapred-site.xml.template mapred-site.xml && vim mapred-site.xml echo master > master && echo slave1 > slaves && echo slave2 >> slaves scp -r /usr/hadoop root@slave1:/usr/ scp -r /usr/hadoop root@slave2:/usr/ vim /etc/profile source /etc/profile
hadoop version master节点: hadoop namenode -format start-all.sh jps
slave2节点:
systemctl daemon-reload
systemctl start mysqld
systemctl status mysqld
grep "temporary password" /var/log/mysqld.log
mysql -u root -p
set global validate_password_policy=0;
set global validate_password_length=4;
alter user 'root'@'localhost' identified by '123456';
\q
mysql -uroot -p123456
create user 'root'@'%' identified by '123456';
grant all privileges on . to 'root'@'%' with grant option;
flush privileges;
CREATE DATABASE hongyaa /*!40100 DEFAULT CHARACTER SET utf8 */;
master节点: mkdir -p /usr/hive && cd /usr/package/ tar -zxvf apache-hive-2.1.1-bin.tar.gz -C /usr/hive/ vim /etc/profile source /etc/profile vim hive-env.sh cp HADOOP_HOME/share/hadoop/yarn/lib/ scp -r /usr/hive root@slave1:/usr/hive/
slave1节点: cp mysql-connector-java-5.1.47-bin.jar /usr/hive/apache-hive-2.1.1-bin/lib/ cp hive-default.xml.template hive-site.xml vim hive-site.xml vim /etc/profile source /etc/profile
master节点: cp hive-default.xml.template hive-site.xml vim hive-site.xml
slave1节点: schematool -dbType mysql -initSchema hive --service metastore
添加节点slave3: slaves3节点: SCR过来所有的包 rm -rf $HADOOP_HOME/hdfs/* cd /usr/hadoop/hadoop-2-4-3/etc/hadoop/ echo slave3 >> slaves /usr/hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start datanode /usr/hadoop/hadoop-2.7.3/sbin/yarn-daemon.sh start nodemanager
master节点: scp /root/.ssh/authorized_keys root@slave3:/root/.ssh/
hdfs dfsadmin -refreshNodes hdfs dfsadmin -report
永久删除节点slave2: 主节点上master: vim hdfs-site.xml vim excludes
hdfs dfsadmin -refreshNodes hdfs dfsadmin -report
slave2节点: hadoop-daemon.sh stop datanode yarn-daemon.sh stop nodemanager
master节点: wget http://47.92.249.178/bigdata/data/loan.csv
7、Spark安装 7.1安装scala环境 1.下载相应安装包,创建对应工作目录/usr/scala,解压scala到相应目录。 mkdir -p /usr/scala cd /usr/scala wget http://172.16.47.240/bigdata/bigdata_tar/scala-2.11.12.tgz tar -zxvf scala-2.11.12.tgz -C /usr/scala rm -rf /usr/hbase/hbase-1.2.4-bin.tar.gz
2.配置scala的环境变量并生效: vim /etc/profile
写入以下内容: ##scala export SCALA_HOME=/usr/scala/scala-2.11.12 export PATH=PATH 查看scala是否安装成功: source /etc/profile scala -version
复制到其他节点: scp -r /usr/scala root@slave1:/usr/ scp -r /usr/scala root@slave2:/usr/
7.2安装spark 1.下载相应安装包,创建对应工作目录/usr/scala,解压scala到相应目录。 mkdir -p /usr/spark cd /usr/spark wget http://172.16.47.240/bigdata/bigdata_tar/spark-2.4.0-bin-hadoop2.7.tgz tar -zxvf spark-2.4.0-bin-hadoop2.7.tgz -C /usr/spark rm -rf /usr/spark/spark-2.4.0-bin-hadoop2.7.tgz
2.复制conf下spark-env.sh文件 cp spark-env.sh.template spark-env.sh
并添加以下内容,具体操作如下图所示: export SPARK_MASTER_IP=master export SCALA_HOME=/usr/scala/scala-2.11.12 export SPARK_WORKER_MEMORY=8g export JAVA_HOME=/usr/java/jdk1.8.0_171 export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3 export HADOOP_CONF_DIR=/usr/hadoop/hadoop-2.7.3/etc/hadoop
3.配置spark从节点,修改slaves文件。(注意slaves节点中只包含节点信息,其他注释不需要) cp slaves.template slaves vim slaves 写入以下内容: slave1 slave2
接下来向所有子节点发送spark配置好的安装包,具体操作如下图所示: 命令:scp -r /usr/spark root@slave1:/usr/ 命令:scp -r /usr/spark root@slave2:/usr/
4.配置spark环境变量 命令:vim /etc/profile 在其中添加如下内容: export SPARK_HOME=/usr/spark/spark-2.4.0-bin-hadoop2.7 export PATH=PATH
生效:source /etc/profile
(3)开启spark环境(master节点) 命令:/usr/spark/spark-2.4.0-bin-hadoop2.7/sbin/start-all.sh