hadoop部署记录3--部署hive

275 阅读4分钟

一起养成写作习惯!这是我参与「掘金日新计划 · 4 月更文挑战」的第3天,点击查看活动详情

此版本为安装Hive3的安装步骤,hive2 和hive3 安装是差不多的,只有少部分的配置文件有区别.

18 安装Mysql

创建一个mysql用户 sudo groupadd -g 1001 mysql sudo useradd -u 1001 -g mysql mysql 设置密码 sudo passwd mysql newpasswd 登录mysql用户 并创建目录 mkdir -p ~/3306/{data,tmp,log,undo,binlog/mysql-bin}

scp tar包文件到 mysql 家目录 解压mysql tar 包

tar zxvf apache-hive-2.3.8-bin.tar.gz

初始化 mysql

bin/mysqld --initialize --user=mysql --basedir=/home/mysql/mysql --datadir=/data/mysql/data

编辑mysql 配置文件 ~/3306/etc/my.cnf 005 和006 的配置文件不完全一样,主要差别是offset 一个是从1开始一个是从2开始每次递增2

005 my.cnf

[client]
port            = 3306
socket          = /home/mysql/3306/tmp/mysql.sock
[mysql]
prompt="\\u@\\h:\\p  [\\d]> 
#pager="less -i -n -S"
#tee=/home/mysql/query.log
no-auto-rehash
[mysqld]
skip-ssl
port                           =3306
log-error                      = /home/mysql/3306/log/mysqld_err.log
basedir                        = /home/mysql/mysql
datadir                        = /data/mysql/data 
tmpdir                         = /home/mysql/3306/tmp
socket                         = /home/mysql/3306/tmp/mysql.sock
pid-file                       =/home/mysql/3306/tmp/mysqld.pid
user                           = mysql
server_id                      = 943306
auto_increment_offset          = 1
auto_increment_increment       = 2 
log-bin                        = /home/mysql/3306/binlog/mysql-bin
wait_timeout                   = 28800
interactive_timeout            = 28800
lower_case_table_names         = 1
character-set-server = utf8
open_files_limit = 65535
max_connections = 1000
max_connect_errors = 6
explicit_defaults_for_timestamp
sql_mode                       = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION
binlog_format = row
binlog_cache_size              = 1081344
binlog_stmt_cache_size         = 1081344
innodb_undo_directory              =/home/mysql/3306/undo
innodb_undo_logs=128
innodb_undo_tablespaces=8
max_relay_log_size = 500M
relay_log_purge = 1
log_slave_updates
innodb_support_xa=1
#5.7
slave-parallel-type=LOGICAL_CLOCK
slave-parallel-workers=16
master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
#gtid
gtid_mode=on
enforce_gtid_consistency=on


#buffers & cache

table_open_cache_instances = 64
thread_stack = 512K
external-locking = FALSE
max_allowed_packet = 32M
sort_buffer_size = 4M
join_buffer_size = 4M
table_open_cache = 2048
table_definition_cache = 2048
table_open_cache = 2048
max_heap_table_size = 96M
thread_cache_size = 800
query_cache_size = 0
query_cache_type = 0
query_cache_limit = 256K
query_cache_min_res_unit = 512
tmp_table_size = 96M
key_buffer_size = 8M
read_buffer_size = 4M
read_rnd_buffer_size = 16M
bulk_insert_buffer_size = 32M
#slow log
slow_query_log = 0
log_timestamps = SYSTEM
slow_query_log_file = /home/mysql/3306/log/dataslow.log
long_query_time = 0.1
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
sync_binlog = 1
expire_logs_days = 7
slave-rows-search-algorithms = 'INDEX_SCAN,HASH_SCAN'
binlog_checksum = 1
relay-log-purge = 1
key_buffer_size = 32M
read_buffer_size = 8M
read_rnd_buffer_size = 4M
lock_wait_timeout = 3600
explicit_defaults_for_timestamp = 1
#myisam
myisam_sort_buffer_size = 128M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
#innodb
#innodb_additional_mem_pool_size = 16M
innodb_buffer_pool_size = 16384M
innodb_buffer_pool_instances = 4
innodb_buffer_pool_load_at_startup = 1
innodb_buffer_pool_dump_at_shutdown = 1
innodb_data_home_dir    =/home/mysql/3306/data
innodb_data_file_path = ibdata1:1024M:autoextend
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 64M
innodb_log_file_size = 256M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 50
innodb_file_per_table = 1
innodb_status_file = 1
innodb_io_capacity = 2000
innodb_io_capacity_max = 8000
transaction_isolation = READ-COMMITTED
innodb_flush_method = O_DIRECT
innodb_purge_threads=4
innodb_page_cleaners = 4
innodb_write_io_threads=16
innodb_read_io_threads=16
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_open_files = 65535
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_online_alter_log_max_size = 4G
internal_tmp_disk_storage_engine = InnoDB
#innodb_stats_on_metadata = 0
# some var for MySQL 5.7
innodb_checksums = 1
innodb_file_format = Barracuda
innodb_file_format_max = Barracuda
query_cache_size = 0
query_cache_type = 0

# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links                 = 0

[mysqld_safe]
log-error                      = /home/mysql/3306/log/mysqld_err.log
pid-file                       = /home/mysql/3306/tmp/mysqld.pid
[mysqldump]
quick
max_allowed_packet = 32M

006my.cnf

[mysql@hadoopHD006 ~]$ cat 3306/etc/my.cnf 
[client]
port            = 3306
socket          = /home/mysql/3306/tmp/mysql.sock
[mysql]
prompt="\\u@\\h:\\p  [\\d]> 
#pager="less -i -n -S"
#tee=/home/mysql/query.log
no-auto-rehash
[mysqld]
skip-ssl
port                           =3306
log-error                      = /home/mysql/3306/log/mysqld_err.log
basedir                        = /home/mysql/mysql
datadir                        = /data/mysql/data 
tmpdir                         = /home/mysql/3306/tmp
socket                         = /home/mysql/3306/tmp/mysql.sock
pid-file                       =/home/mysql/3306/tmp/mysqld.pid
user                           = mysql
server_id                      = 953306
auto_increment_offset          = 2
auto_increment_increment       = 2 
log-bin                        = /home/mysql/3306/binlog/mysql-bin
wait_timeout                   = 2880
interactive_timeout            = 2880
lower_case_table_names         = 1
character-set-server = utf8
open_files_limit = 65535
max_connections = 1000
max_connect_errors = 6
explicit_defaults_for_timestamp
sql_mode                       = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION
binlog_format = row
binlog_cache_size              = 1081344
binlog_stmt_cache_size         = 1081344
innodb_undo_directory              =/home/mysql/3306/undo
innodb_undo_logs=128
innodb_undo_tablespaces=8
max_relay_log_size = 500M
relay_log_purge = 1
log_slave_updates
innodb_support_xa=1
#5.7
slave-parallel-type=LOGICAL_CLOCK
slave-parallel-workers=16
master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
#gtid
gtid_mode=on
enforce_gtid_consistency=on


#buffers & cache

table_open_cache_instances = 64
thread_stack = 512K
external-locking = FALSE
max_allowed_packet = 32M
sort_buffer_size = 4M
join_buffer_size = 4M
table_open_cache = 2048
table_definition_cache = 2048
table_open_cache = 2048
max_heap_table_size = 96M
thread_cache_size = 800
query_cache_size = 0
query_cache_type = 0
query_cache_limit = 256K
query_cache_min_res_unit = 512
tmp_table_size = 96M
key_buffer_size = 8M
read_buffer_size = 4M
read_rnd_buffer_size = 16M
bulk_insert_buffer_size = 32M
#slow log
slow_query_log = 0
log_timestamps = SYSTEM
slow_query_log_file = /home/mysql/3306/log/dataslow.log
long_query_time = 0.1
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
sync_binlog = 1
expire_logs_days = 7
slave-rows-search-algorithms = 'INDEX_SCAN,HASH_SCAN'
binlog_checksum = 1
relay-log-purge = 1
key_buffer_size = 32M
read_buffer_size = 8M
read_rnd_buffer_size = 4M
lock_wait_timeout = 3600
explicit_defaults_for_timestamp = 1
#myisam
myisam_sort_buffer_size = 128M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
#innodb
#innodb_additional_mem_pool_size = 16M
innodb_buffer_pool_size = 16384M
innodb_buffer_pool_instances = 4
innodb_buffer_pool_load_at_startup = 1
innodb_buffer_pool_dump_at_shutdown = 1
innodb_data_home_dir    =/home/mysql/3306/data
innodb_data_file_path = ibdata1:1024M:autoextend
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 64M
innodb_log_file_size = 256M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 50
innodb_file_per_table = 1
innodb_status_file = 1
innodb_io_capacity = 2000
innodb_io_capacity_max = 8000
transaction_isolation = READ-COMMITTED
innodb_flush_method = O_DIRECT
innodb_purge_threads=4
innodb_page_cleaners = 4
innodb_write_io_threads=16
innodb_read_io_threads=16
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_open_files = 65535
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_online_alter_log_max_size = 4G
internal_tmp_disk_storage_engine = InnoDB
#innodb_stats_on_metadata = 0
# some var for MySQL 5.7
innodb_checksums = 1
innodb_file_format = Barracuda
innodb_file_format_max = Barracuda
query_cache_size = 0
query_cache_type = 0

# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links                 = 0

[mysqld_safe]
log-error                      = /home/mysql/3306/log/mysqld_err.log
pid-file                       = /home/mysql/3306/tmp/mysqld.pid
[mysqldump]
quick
max_allowed_packet = 32M

启动mysql 服务

numactl --interleave=all /home/mysql/mysql/bin/mysqld_safe --defaults-file=/home/mysql/3306/etc/my.cnf --user=mysql&

数据库停止命令

 /home/mysql/mysql/bin/mysqladmin -uroot -p'password' -S /home/mysql/3306/tmp/mysql.sock --default-character-set=utf8mb4 shutdown

链接数据库 修改root 密码

set password for root@localhost=password('****');

005 mysql中执行

GRANT ALL PRIVILEGES ON *.* TO 'synchrouser'@'10.10.10.1' IDENTIFIED BY 'password';
flush privileges;  //创建同步用户,登录的主机

006 mysql中执行

GRANT ALL PRIVILEGES ON *.* TO 'synchrouser'@'10.10.10.5' IDENTIFIED BY 'password';//创建同步用户,登录的主机

查看每个二进制文件偏移量

show master status;
mysql> show master status;
+------------------+----------+--------------+------------------+--------------------------------------------------------------------------------------+
| File             | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set                                                                    |
+------------------+----------+--------------+------------------+--------------------------------------------------------------------------------------+
| mysql-bin.000004 |    18450 |              |                  | 7a7f7914-9c3c-11eb-b0cd-f03f952d9e30:1-11,
cedc84e5-9c3e-11eb-9cea-30e98e5557a5:6-48 |
+------------------+----------+--

在两台主机上 增加同步更新点 需要先停slave; 在更新 在启动

MySQL> stop slave;
 MySQL> change master to master_host='对端ip ', master_port=3306, master_user='synchrouser',master_password='password',master_log_file='mysql-bin.000004',master_log_pos=194;
 MySQL> start slave ;

查看两台主机同步状态 主要看Slave_IO_Running与Slave_SQL_Running这两个字段都显示yes就行了。


MySQL> show slave status\G;
查看同步线程

MySQL>  show processlist\G

若命令窗口有下图的三个线程,则证明配置成功,若缺少任何一个线程,请重启主、备两台机器的MySql服务,然后再在MySql命令窗口中输入上述命令查看是否有下图标注的三个线程。若没有这三个线程,请重新检查配置是否正确。

[参考文档] (blog.csdn.net/oguro/artic…)

注意:主备服务器的MySql进程必须都有这三个线程才证明配置成功!
参考



alter database hive character set latin1;

19 安装hive 因为hive 配置两台主机都是一样的所以这里只写一台的 另一台只是mysql IP配置的不一样

将文件复制到apps/下 解压文件

scp  text@10.1.1.1:/home/text/senmao/user/apache-hive-2.3.8-bin.tar.gz ./ 
tar zxvf apache-hive-2.3.8-bin.tar.gz
ln -s apache-hive-2.3.8-bin hive

编辑文件 conf/hive-env.sh

export HADOOP_HOME=/home/hadoop/apps/hadoop
export HIVE_HOME=/home/hadoop/apps/hive
export HADOOP_HEAPSIZE=50240
export HIVE_CONF_DIR=$HIVE_HOME/conf
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export JAVA_HOME=/home/hadoop/apps/jdk1.8.0_201

编辑文件 beeline-log4j2.properties

export HADOOP_HOME=/home/hadoop/apps/hadoop
export HIVE_HOME=/home/hadoop/apps/hive
export HADOOP_HEAPSIZE=50240
export HIVE_CONF_DIR=$HIVE_HOME/conf
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export JAVA_HOME=/home/hadoop/apps/jdk1.8.0_201
[hadoop@hadoopHD005 bin]$ cat beeline-log4j2.properties
cat: beeline-log4j2.properties: No such file or directory
[hadoop@hadoopHD005 bin]$ cd ../conf/
[hadoop@hadoopHD005 conf]$ cat beeline-log4j2.properties 
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

status = INFO
name = BeelineLog4j2
packages = org.apache.hadoop.hive.ql.log

# list of properties
property.hive.log.level = WARN
property.hive.root.logger = console
property.hive.log.dir = /home/hadoop/apps/apache-hive-2.3.8-bin/logs
# list of all appenders
appenders = console

# console appender
appender.console.type = Console
appender.console.name = console
appender.console.target = SYSTEM_ERR
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n

# list of all loggers
loggers = HiveConnection

# HiveConnection logs useful info for dynamic service discovery
logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
logger.HiveConnection.level = INFO

# root logger
rootLogger.level = ${sys:hive.log.level}
rootLogger.appenderRefs = root
rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}

因为这个配置文件中写了日志目录 所以需要创建目录

编辑文件hive-site.xml

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
   Licensed to the Apache Software Foundation (ASF) under one or more
   contributor license agreements.  See the NOTICE file distributed with
   this work for additional information regarding copyright ownership.
   The ASF licenses this file to You under the Apache License, Version 2.0
   (the "License"); you may not use this file except in compliance with
   the License.  You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
-->
<configuration>
<property>
  <name>javax.jdo.option.ConnectionURL</name> #local
  <value>jdbc:mysql://10.10.10.1:3306/hive?createDatabaseIfNotExist=true&amp;useUnicode=true&amp;characterEncoding=UTF-8</value>
  <description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
  <name>javax.jdo.option.ConnectionDriverName</name>
  <value>com.mysql.jdbc.Driver</value>
  <description>Driver class name for a JDBC metastore</description>
</property>
<property>
  <name>javax.jdo.option.ConnectionUserName</name>
  <value>hive</value>
  <description>username to use against metastore database</description>
</property>
<property>
  <name>javax.jdo.option.ConnectionPassword</name>
  <value>Neirongjifeihive_123</value>
  <description>password to use against metastore database</description>
</property>
<property>
  <name>hive.stats.dbclass</name>
  <value>jdbc:mysql</value>
  <description>The default database that stores temporary hive statistics.</description>
</property>
<property>
  <name>hive.stats.jdbcdriver</name>
  <value>com.mysql.jdbc.Driver</value>
  <description>The JDBC driver for the database that stores temporary hive statistics.</description>
</property>
<property>
  <name>hive.stats.dbconnectionstring</name>
  <value>jdbc:mysql://10.10.10.1:3306/hivestats?useUnicode=true&amp;characterEncoding=UTF-8&amp;user=hive&amp;password=hive_123&amp;createDatabaseIfNotExist=true</value>
  <description>The default connection string for the database that stores temporary hive statistics.</description>
</property>
<property>
  <name>hive.metastore.uris</name>
  <value>thrift://10.10.10.1:9085</value>
  <description>Thrift uri for the remote metastore. Used by metastore client to connect to remote metastore.</description>
</property>
<property>
  <name>hive.exec.compress.intermediate</name>
  <value>true</value>
  <description> This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* </description>
</property>
<property>
  <name>hive.exec.compress.output</name>
  <value>true</value>
  <description> This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* </description>
</property>
<property>
  <name>hive.mapred.reduce.tasks.speculative.execution</name>
  <value>false</value>
  <description>Whether speculative execution for reducers should be turned on. </description>
</property>
<property>
  <name>hive.server2.thrift.port</name>
  <value>10000</value>
  <description>Port number of HiveServer2 Thrift interface.
  Can be overridden by setting $HIVE_SERVER2_THRIFT_PORT</description>
</property>
<property>
  <name>hive.stats.autogather</name>
  <value>true</value>
  <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
</property>
<property>
  <name>mapreduce.job.queuename</name>
  <value>interactive-query</value>
</property>
<property>
  <name>hive.metastore.authorization.storage.checks</name>
  <value>false</value>
  <description>Should the metastore do authorization checks against the underlying storage
  for operations like drop-partition (disallow the drop-partition if the user in
  question doesn't have permissions to delete the corresponding directory
  on the storage).</description>
</property>
<property>
  <name>hive.server2.authentication</name>
  <value>NOSASL</value>
</property>
<property>
  <name>hive.metastore.schema.verification</name>
  <value>false</value>
</property>
<property>
  <name>datanucleus.schema.autoCreateTables</name>
  <value>true</value>
</property>
<property>
  <name>hive.metastore.local</name>
  <value>false</value>
</property>
<property>
  <name>hive.warehouse.subdir.inherit.perms</name>
  <value>true</value>
  <description>Set this to true if the the table directories should inherit the
    permission of the warehouse or database directory instead of being created
    with the permissions derived from dfs umask</description>
</property>

<property>
  <name>javax.jdo.option.Multithreaded</name>
  <value>true</value>
  <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
</property>

<property>
  <name>datanucleus.cache.level2</name>
  <value>false</value>
  <description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
</property>

<property>
  <name>datanucleus.cache.level2.type</name>
  <value>SOFT</value>
  <description>SOFT=soft reference based cache, WEAK=weak reference based cache.</description>
</property>

<property>
  <name>hive.metastore.connect.retries</name>
  <value>50</value>
  <description>Number of retries while opening a connection to metastore</description>
</property>

<property>
  <name>hive.metastore.failure.retries</name>
  <value>30</value>
  <description>Number of retries upon failure of Thrift metastore calls</description>
</property>

<property>
  <name>hive.metastore.client.connect.retry.delay</name>
  <value>5</value>
  <description>Number of seconds for the client to wait between consecutive connection attempts</description>
</property>

<property>
  <name>hive.metastore.client.socket.timeout</name>
  <value>1000</value>
  <description>MetaStore Client socket timeout in seconds</description>
</property>

<property>
  <name>hive.metastore.execute.setugi</name>
  <value>false</value>
  <description>In</description>
</property>

<property>
  <name>hive.server2.enable.impersonation</name>
  <value>false</value>
</property>

<property>
  <name>hive.server2.enable.doAs</name>
  <value>false</value>
</property>

    <!-- 分区表 -->
    <property>
        <name>hive.stats.autogather</name>
        <value>true</value>
    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
    </property>
    <property>
        <name>hive.exec.dynamic.partition</name>
        <value>true</value>
    </property>
    <property>
        <name>hive.exec.dynamic.partition.mode</name>
        <value>nonstrict</value>
    </property>
    <property>
        <name>hive.exec.max.dynamic.partitions</name>
        <value>100000</value>
    </property>
    <property>
        <name>hive.exec.max.dynamic.partitions.pernode</name>
        <value>100000</value>
    </property>
<!--设置hiveserver2队列选择不受公平调度器影响 -->
<property>
    <name>hive.server2.map.fair.scheduler.queue</name>
    <value>false</value>
</property>
<!--设置hive合并小文件-->
<property>
    <name>hive.input.format</name>
    <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
</property>
<!--对小于100M的文件进行合并-->
<property>
    <name>mapred.min.split.size</name>
    <value>100000000</value>
</property>
<property>
    <name>mapred.min.split.size.per.node</name>
    <value>100000000</value>
</property>
<property>
    <name>mapred.min.split.size.per.rack</name>
    <value>100000000</value>
</property>

</configuration>

基于元数据存储在mysql中的安装 将连接mysql的jar包拷贝到hive目录下

scp  text@10.1.1.1:/home/text/senmao/user/mysql-connector-java-5.1.38-bin.jar ~/hive/lib/

编写启动脚本

[hadoop@hadoopHD005 bin]$ cat  start-hivemetastore 
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
nohup "$bin"/hive --service metastore -p 9085 "$@" > "$bin"/metastore.log 2>&1 &
[hadoop@hadoopHD005 bin]$ cat   start-hiveserver2
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
nohup "$bin"/hive --service hiveserver2 "$@" > "$bin"/server2.log 2>&1 &
 
 

启动程序 时有报错 故修改了hive-site.xml 所以hive3 和hive2的这个文件是不一样的