Hadoop install:修订间差异

来自牛奶河Wiki
跳到导航 跳到搜索
无编辑摘要
无编辑摘要
第7行: 第7行:


==== Java ====
==== Java ====
/usr/bin/java -> /usr/java/jdk1.8.0_361/jre/bin/java
ln -s /usr/java/jdk1.8.0_361/jre/bin/java /usr/bin/java


==== hadoop ====
<nowiki>#</nowiki> Java Error
 
java 以前使用 --version 来查看版本的;java8 以后变更为 -version。
 
$ java --version
Unrecognized option: --version
Error: Could not create the Java Virtual Machine.
Error: A fatal exception has occurred. Program will exit.
 
$  java -version
java version "1.8.0_361"
Java(TM) SE Runtime Environment (build 1.8.0_361-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.361-b09, mixed mode)
 
====hadoop====
ln -s /opt/hadoop-3.3.0 /opt/hadoop
ln -s /opt/hadoop-3.3.0 /opt/hadoop


==== profile ====
====profile====
<nowiki>#</nowiki> Java, 20201010, Adam
<nowiki>#</nowiki> Java, 20201010, Adam


第29行: 第43行:
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop


=== Hadoop 配置 ===
===Hadoop 配置 ===


==== 配置 Hadoop 环境脚本文件中的 JAVA_HOME 参数 ====
====配置 Hadoop 环境脚本文件中的 JAVA_HOME 参数====
<nowiki>#</nowiki> hadoop是守护线程 读取不到 /etc/profile 里面配置的JAVA_HOME路径
<nowiki>#</nowiki> hadoop是守护线程 读取不到 /etc/profile 里面配置的JAVA_HOME路径


第63行: 第77行:
   </property>
   </property>
   <property>
   <property>
          <!-- 保存临时文件目录 -->
            <!-- 保存临时文件目录 -->
     <name>hadoop.tmp.dir</name>
     <name>hadoop.tmp.dir</name>
     <value>/u01/hdfs/tmp</value>
     <value>/u01/hdfs/tmp</value>
   </property>
   </property>
  </configuration>
  </configuration>
=====hdfs-site.xml (HDFS组件)=====
=====hdfs-site.xml (HDFS组件) =====
  <configuration>
  <configuration>
   <property>
   <property>
第76行: 第90行:
   </property>
   </property>
   <property>
   <property>
          <!-- 第二节点地址 -->
            <!-- 第二节点地址 -->
     <name>dfs.namenode.secondary.http-address</name>
     <name>dfs.namenode.secondary.http-address</name>
     <value>g2-hdfs-02:9870</value>
     <value>g2-hdfs-02:9870</value>
第93行: 第107行:
   </property>
   </property>
   <property>
   <property>
          <!-- 配置false后,无需权限即可生成dfs上的文件 -->
            <!-- 配置false后,无需权限即可生成dfs上的文件 -->
     <name>dfs.permissions</name>
     <name>dfs.permissions</name>
     <value>false</value>
     <value>false</value>
第101行: 第115行:
  -- del
  -- del
   <property>
   <property>
          <!-- 备份数为默认值3 -->
            <!-- 备份数为默认值3 -->
     <name>dfs.replication</name>
     <name>dfs.replication</name>
     <value>3</value>
     <value>3</value>
第134行: 第148行:
       <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/ hadoop/mapreduce/lib/*</value>
       <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/ hadoop/mapreduce/lib/*</value>
   </property>
   </property>
=====yarn-site.xml=====
===== yarn-site.xml=====
  <configuration>
  <configuration>
   <property>
   <property>
第187行: 第201行:
..
..


=== INIT===
===INIT===
  # chown
  # chown
  chown -R hdfs:hadoop /opt/hadoop*
  chown -R hdfs:hadoop /opt/hadoop*

2023年2月17日 (五) 21:14的版本

ENV

USER

groupadd hadoop -g 1001

useradd hdfs -g hadoop -u 1001

Java

ln -s /usr/java/jdk1.8.0_361/jre/bin/java /usr/bin/java

# Java Error

java 以前使用 --version 来查看版本的;java8 以后变更为 -version。

$ java --version
Unrecognized option: --version
Error: Could not create the Java Virtual Machine.
Error: A fatal exception has occurred. Program will exit.
$  java -version
java version "1.8.0_361"
Java(TM) SE Runtime Environment (build 1.8.0_361-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.361-b09, mixed mode)

hadoop

ln -s /opt/hadoop-3.3.0 /opt/hadoop

profile

# Java, 20201010, Adam

export JAVA_HOME=/usr/java/jdk1.8.0_361

export PATH=$PATH:$JAVA_HOME/bin

# hadoop, 20201010, Adam

export HADOOP_HOME=/opt/hadoop

export PATH=$PATH:$HADOOP_HOME/bin

export PATH=$PATH:$HADOOP_HOME/sbin

export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

Hadoop 配置

配置 Hadoop 环境脚本文件中的 JAVA_HOME 参数

# hadoop是守护线程 读取不到 /etc/profile 里面配置的JAVA_HOME路径

# /opt/hadoop/etc/hadoop/

# hadoop-env.sh, mapred-env.sh, yarn-env.sh

cp hadoop-env.sh hadoop-env.sh.20210409

cp mapred-env.sh mapred-env.sh.20210409

cp yarn-env.sh yarn-env.sh.20210409

echo '

# hdfs, 20210409, Adam

export JAVA_HOME=/usr/java/jdk1.8.0_361' >>

Setup

core-site.xml (Common组件)
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://g2-hdfs-01:9000</value>
  </property>
  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/u01/hdfs/tmp</value>
  </property>
</configuration>
hdfs-site.xml (HDFS组件)
<configuration>
  <property>
    <name>dfs.namenode.http-address</name>
    <value>g2-hdfs-01:9870</value>
  </property>
  <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>g2-hdfs-02:9870</value>
  </property>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/u01/hdfs/dfs/nn</value>
  </property>
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/u01/hdfs/dfs/dn</value>
  </property>
  <property> 
    <name>dfs.webhdfs.enabled</name> 
    <value>true</value> 
  </property>
  <property>
    <name>dfs.permissions</name>
    <value>false</value>
  </property>
</configuration>

-- del
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>
  <property>
    <name>dfs.blocksize</name>
    <value>268435456</value>
  </property>
  <property>
    <name>dfs.namenode.handler.count</name>
    <value>100</value>
  </property>
mapred-site.xml
<configuration>
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value> 
  </property>
</configuration>

-- del
  <property>
     <name>mapreduce.jobhistory.address</name>
     <value>g2-hdfs-01:10020</value>
  </property>
  <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>g2-hdfs-01:19888</value>
  </property>
  <property>
     <name>mapreduce.application.classpath</name>
     <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/ hadoop/mapreduce/lib/*</value>
  </property>
yarn-site.xml
<configuration>
  <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>g2-hdfs-01</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services</name>  
    <value>mapreduce_shuffle</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>g2-hdfs-01:8088</value>
  </property>
  <property>
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>32768</value>
  </property>
  <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
  </property>
  <property>
    <name>yarn.nodemanager.env-whitelist</name>
    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPE ND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
  </property>
</configuration>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>hadoop01/192.168.44.5:8088</value>
    <description>配置外网只需要替换外网ip为真实ip,否则默认为 localhost:8088</description>
  </property>

yarn.resourcemanager.hostname
指定yarn的ResourceManager管理界面的地址,不配的话,Active Node始终为0
yarn.scheduler.maximum-allocation-mb
每个节点可用内存,单位MB,默认8182MB
yarn.nodemanager.aux-services
reducer获取数据的方式
yarn.nodemanager.vmem-check-enabled
false = 忽略虚拟内存的检查


# workers

g2-hdfs-01

g2-hdfs-02

..

INIT

# chown
chown -R hdfs:hadoop /opt/hadoop*
# nn
/opt/hadoop/bin/hdfs namenode -format

Start

## Start : root(发现需要用 hdfs 停服务,root不可停)
/opt/hadoop/sbin/start-dfs.sh
/opt/hadoop/sbin/start-yarn.sh

/opt/hadoop/sbin/stop-dfs.sh
/opt/hadoop/sbin/stop-yarn.sh

http://mc0:9870   # hdfs
http://mc0:8088   # yarn

## 单节点启停
# /opt/hadoop/bin
hdfs --daemon start datanode
hdfs --daemon start namenode

yarn --daemon start nodemanager
yarn --daemon stop resourcemanager