获取镜像

#本机内
docker pull ubuntu:16.04

编排镜像

  1. 启动一个容器

    #本机内
    docker run -i -t --name master ubuntu:16.04
  2. 在容器内进行安装配置

    SRE实战 互联网时代守护先锋,助力企业售后服务体系运筹帷幄!一键直达领取阿里云限量特价优惠。
    #容器内
    apt update
    apt install openjdk8-jdk, ssh, net-tools,iputils-ping
    echo 'export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/' >> ~/.bashrc
    echo '/usr/sbin/sshd' >> ~/.bashrc
    mkdir /var/run/sshd
  3. 配置ssh

    #在本地
    docker cp ~/.ssh/id_rsa.pub master:/root/
    #在master容器内
    ssh-keygen -t rsa
    cd ~/.ssh
    cp id_rsa.pub authorized_keys
    echo ~/id_rsa.pub >> authorized_keys
    chmod root:root authorized_keys
    chown 600 authorized_keys
  4. 安装hadoop

    #新开一个终端
    docker cp ./hadoop-2.7.5.tar.gz master:/root
    #在容器内
    tar -zxvf ~/hadoop-2.7.5.tar.gz -C ~/Program/
    rm ~/hadoop-2.7.5.tar.gz
    echo 'export HADOOP_HOME=/root/Program/hadoop-2.7.5' >> ~/.bashrc
    echo 'export HADOOP_CONFIG_HOME=$HADOOP_HOME/etc/hadoop' >>~/.bashrc
    echo 'export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin' >> ~/.bashrc
  5. 修改hadoop配置文件

    #hadoop-env.sh
    export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
    #core-site.xml
    <configuration>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/root/Program/hadoop-2.7.5/tmp</value>
    </property>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://master:9000</value>
    </property>
    </configuration>
    
    #hdfs-site.xml
    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/root/Program/hadoop-2.7.5/tmp/dfs/namenode</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/root/Program/hadoop-2.7.5/tmp/dfs/datanode</value>
    </property>
    </configuration>
    
    #mapred-site.xml
    <configuration>
    <property>
    <name>mapred.job.tracker</name>
    <value>master:9001</value>
    </property>
    </configuration>
  6. 生成镜像

    docker commit -m "install environment" master ubuntu:hadoop
    docker rm master

配置网络

  1. 生成网段并给容器设置ip

    docker network create --subnet=10.0.0.0/16 hadoopnetwork
    docker run -i -t --name master -h master --network hadoopnetwork --ip 10.0.0.2 ubuntu:hadoop
    docker run -i -t --name slave1 -h slave1 --network hadoopnetwork --ip 10.0.0.3 ubuntu:hadoop
    docker run -i -t --name slave2 -h slave2 --network hadoopnetwork --ip 10.0.0.4 ubuntu:hadoop
    docker start master
    docker start slave1
    docker start slave2
    
  2. 测试ip是否可访问

    ping 10.0.0.2
    ssh root@10.0.0.2
  3. 修改各个节点上的hosts文件

    #hosts
    10.0.0.2        master
    10.0.0.3        slave1
    10.0.0.4        slave2
  4. 确认master节点能连接slave节点

    #在master节点上
    ssh root@slave1
    ssh root@slave2
  5. 修改master节点配置文件

    #slaves
    localhost
    slave1
    slave2
  6. 启动hadoop集群

    #在master节点上
    hadoop namenode -format
    start-all.sh
  7. 查看是否运行成功

    #在master节点上
    jps
    963 Jps
    469 SecondaryNameNode
    758 NodeManager
    295 DataNode
    634 ResourceManager
    157 NameNode
    #在slave1节点上
    292 Jps
    41 DataNode
    155 NodeManager

     hadoop docker集群搭建 Hadoop

扫码关注我们
微信号:SRE实战
拒绝背锅 运筹帷幄