002_如何安装JDK以及Hadoop集群环境搭建_2台机都要配置一致

Wesley13
• 阅读 767

1.linux的jdk1.6安装和环境变量配置

#安装jdk1.6

cd /usr/local/
./jdk-6u45-linux-x64.bin

#配置环境变量并激活

[root@master local]# vim /etc/profile.d/jdk.sh

export JAVA_HOME=/usr/local/jdk6
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

[root@master local]# source /etc/profile

2.解压hadoop1.2_创建tmp目录

tar -zxvf hadoop-1.2.1-bin.tar.gz

[root@master local]# cd hadoop-1.2.1/
[root@master hadoop-1.2.1]# mkdir tmp

3.hadoop的配置文件修改(6个文件)

[root@master conf]# cd /usr/local/hadoop-1.2.1/conf/

    masters

[root@master conf]# cat masters
master

    slaves

[root@master conf]# cat slaves
slave1

    core-site.xml

[root@master conf]# cat core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/local/hadoop-1.2.1/tmp</value>
        </property>
        <property>
                <name>fs.default.name</name>
                <value>hdfs://192.168.1.60:9000</value>
        </property>
</configuration>

    mapred-site.xml

[root@master conf]# cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
        <property>
                <name>mapred.job.tracker</name>
                <value>http://192.168.1.60:9001</value>
        </property>
</configuration>

    hdfs-site.xml

[root@master conf]# cat hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
        <property>
                <name>dfs.replication</name>
                <value>3</value>
        </property>
</configuration>

    hadoop-env.sh

#sh文件最下方添加JAVA_HOME的值

export JAVA_HOME=/usr/local/jdk6

# 通过SCP传文件给slave1

scp -rp masters slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp slaves slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp core-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp mapred-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp hdfs-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp hadoop-env.sh slave1:/usr/local/hadoop-1.2.1/conf/

4.启动hadoop

    (1)第一次启动需要格式化namenode

[root@master bin]# ./hadoop namenode -format
17/04/11 01:41:04 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = master/192.168.1.34
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 1.2.1
STARTUP_MSG:   build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2 -r 1503152; compiled by 'mattf' on Mon Jul 22 15:23:09 PDT 2013
STARTUP_MSG:   java = 1.6.0_45
************************************************************/
17/04/11 01:41:04 INFO util.GSet: Computing capacity for map BlocksMap
17/04/11 01:41:04 INFO util.GSet: VM type       = 64-bit
17/04/11 01:41:04 INFO util.GSet: 2.0% max memory = 1013645312
17/04/11 01:41:04 INFO util.GSet: capacity      = 2^21 = 2097152 entries
17/04/11 01:41:04 INFO util.GSet: recommended=2097152, actual=2097152
17/04/11 01:41:04 INFO namenode.FSNamesystem: fsOwner=root
17/04/11 01:41:04 INFO namenode.FSNamesystem: supergroup=supergroup
17/04/11 01:41:04 INFO namenode.FSNamesystem: isPermissionEnabled=true
17/04/11 01:41:04 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
17/04/11 01:41:04 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
17/04/11 01:41:04 INFO namenode.FSEditLog: dfs.namenode.edits.toleration.length = 0
17/04/11 01:41:04 INFO namenode.NameNode: Caching file names occuring more than 10 times 
17/04/11 01:41:04 INFO common.Storage: Image file /usr/local/hadoop-1.2.1/tmp/dfs/name/current/fsimage of size 110 bytes saved in 0 seconds.
17/04/11 01:41:04 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/usr/local/hadoop-1.2.1/tmp/dfs/name/current/edits
17/04/11 01:41:04 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/usr/local/hadoop-1.2.1/tmp/dfs/name/current/edits
17/04/11 01:41:04 INFO common.Storage: Storage directory /usr/local/hadoop-1.2.1/tmp/dfs/name has been successfully formatted.
17/04/11 01:41:04 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.1.34
************************************************************/

    (2)启动hadoop

[root@master bin]# ./start-all.sh 
starting namenode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-namenode-master.out
slave1: starting datanode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-datanode-slave1.out
The authenticity of host 'master (192.168.1.34)' can't be established.
RSA key fingerprint is e3:c2:f6:71:e2:e7:97:00:f1:b7:c2:86:42:7f:5d:2c.
Are you sure you want to continue connecting (yes/no)? yes
master: Warning: Permanently added 'master,192.168.1.34' (RSA) to the list of known hosts.
master: starting secondarynamenode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-secondarynamenode-master.out
starting jobtracker, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-jobtracker-master.out
slave1: starting tasktracker, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-tasktracker-slave1.out

5.检查hadoop集群环境是否成功启动

    (1)master使用jps查看

[root@master bin]# jps
2461 JobTracker
2227 NameNode
2383 SecondaryNameNode
2603 Jps

   (2)slave1使用jps查看

[root@slave1 local]# jps
2068 Jps
1982 TaskTracker
1908 DataNode

    (3)master使用hdfs命令

[root@master bin]# ./hadoop fs -ls /
Found 1 items
drwxr-xr-x   - root supergroup          0 2017-04-11 01:43 /usr

[root@master bin]# ./hadoop fs -put /etc/passwd /
[root@master bin]# ./hadoop fs -ls /
Found 2 items
-rw-r--r--   3 root supergroup       1380 2017-04-11 01:48 /passwd
drwxr-xr-x   - root supergroup          0 2017-04-11 01:43 /usr

#Game Over

点赞
收藏
评论区
推荐文章
blmius blmius
3年前
MySQL:[Err] 1292 - Incorrect datetime value: ‘0000-00-00 00:00:00‘ for column ‘CREATE_TIME‘ at row 1
文章目录问题用navicat导入数据时,报错:原因这是因为当前的MySQL不支持datetime为0的情况。解决修改sql\mode:sql\mode:SQLMode定义了MySQL应支持的SQL语法、数据校验等,这样可以更容易地在不同的环境中使用MySQL。全局s
Easter79 Easter79
3年前
swap空间的增减方法
(1)增大swap空间去激活swap交换区:swapoff v /dev/vg00/lvswap扩展交换lv:lvextend L 10G /dev/vg00/lvswap重新生成swap交换区:mkswap /dev/vg00/lvswap激活新生成的交换区:swapon v /dev/vg00/lvswap
皕杰报表之UUID
​在我们用皕杰报表工具设计填报报表时,如何在新增行里自动增加id呢?能新增整数排序id吗?目前可以在新增行里自动增加id,但只能用uuid函数增加UUID编码,不能新增整数排序id。uuid函数说明:获取一个UUID,可以在填报表中用来创建数据ID语法:uuid()或uuid(sep)参数说明:sep布尔值,生成的uuid中是否包含分隔符'',缺省为
待兔 待兔
4个月前
手写Java HashMap源码
HashMap的使用教程HashMap的使用教程HashMap的使用教程HashMap的使用教程HashMap的使用教程22
Jacquelyn38 Jacquelyn38
3年前
2020年前端实用代码段,为你的工作保驾护航
有空的时候,自己总结了几个代码段,在开发中也经常使用,谢谢。1、使用解构获取json数据let jsonData  id: 1,status: "OK",data: 'a', 'b';let  id, status, data: number   jsonData;console.log(id, status, number )
Stella981 Stella981
3年前
KVM调整cpu和内存
一.修改kvm虚拟机的配置1、virsheditcentos7找到“memory”和“vcpu”标签,将<namecentos7</name<uuid2220a6d1a36a4fbb8523e078b3dfe795</uuid
Wesley13 Wesley13
3年前
mysql设置时区
mysql设置时区mysql\_query("SETtime\_zone'8:00'")ordie('时区设置失败,请联系管理员!');中国在东8区所以加8方法二:selectcount(user\_id)asdevice,CONVERT\_TZ(FROM\_UNIXTIME(reg\_time),'08:00','0
Wesley13 Wesley13
3年前
00:Java简单了解
浅谈Java之概述Java是SUN(StanfordUniversityNetwork),斯坦福大学网络公司)1995年推出的一门高级编程语言。Java是一种面向Internet的编程语言。随着Java技术在web方面的不断成熟,已经成为Web应用程序的首选开发语言。Java是简单易学,完全面向对象,安全可靠,与平台无关的编程语言。
Wesley13 Wesley13
3年前
MySQL部分从库上面因为大量的临时表tmp_table造成慢查询
背景描述Time:20190124T00:08:14.70572408:00User@Host:@Id:Schema:sentrymetaLast_errno:0Killed:0Query_time:0.315758Lock_
Python进阶者 Python进阶者
10个月前
Excel中这日期老是出来00:00:00,怎么用Pandas把这个去除
大家好,我是皮皮。一、前言前几天在Python白银交流群【上海新年人】问了一个Pandas数据筛选的问题。问题如下:这日期老是出来00:00:00,怎么把这个去除。二、实现过程后来【论草莓如何成为冻干莓】给了一个思路和代码如下:pd.toexcel之前把这