1.linux的jdk1.6安装和环境变量配置
#安装jdk1.6
cd /usr/local/
./jdk-6u45-linux-x64.bin
#配置环境变量并激活
[root@master local]# vim /etc/profile.d/jdk.sh
export JAVA_HOME=/usr/local/jdk6
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
[root@master local]# source /etc/profile
2.解压hadoop1.2_创建tmp目录
tar -zxvf hadoop-1.2.1-bin.tar.gz
[root@master local]# cd hadoop-1.2.1/
[root@master hadoop-1.2.1]# mkdir tmp
3.hadoop的配置文件修改(6个文件)
[root@master conf]# cd /usr/local/hadoop-1.2.1/conf/
masters
[root@master conf]# cat masters
master
slaves
[root@master conf]# cat slaves
slave1
core-site.xml
[root@master conf]# cat core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-1.2.1/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://192.168.1.60:9000</value>
</property>
</configuration>
mapred-site.xml
[root@master conf]# cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>http://192.168.1.60:9001</value>
</property>
</configuration>
hdfs-site.xml
[root@master conf]# cat hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
</configuration>
hadoop-env.sh
#sh文件最下方添加JAVA_HOME的值
export JAVA_HOME=/usr/local/jdk6
# 通过SCP传文件给slave1
scp -rp masters slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp slaves slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp core-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp mapred-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp hdfs-site.xml slave1:/usr/local/hadoop-1.2.1/conf/
scp -rp hadoop-env.sh slave1:/usr/local/hadoop-1.2.1/conf/
4.启动hadoop
(1)第一次启动需要格式化namenode
[root@master bin]# ./hadoop namenode -format
17/04/11 01:41:04 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = master/192.168.1.34
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 1.2.1
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2 -r 1503152; compiled by 'mattf' on Mon Jul 22 15:23:09 PDT 2013
STARTUP_MSG: java = 1.6.0_45
************************************************************/
17/04/11 01:41:04 INFO util.GSet: Computing capacity for map BlocksMap
17/04/11 01:41:04 INFO util.GSet: VM type = 64-bit
17/04/11 01:41:04 INFO util.GSet: 2.0% max memory = 1013645312
17/04/11 01:41:04 INFO util.GSet: capacity = 2^21 = 2097152 entries
17/04/11 01:41:04 INFO util.GSet: recommended=2097152, actual=2097152
17/04/11 01:41:04 INFO namenode.FSNamesystem: fsOwner=root
17/04/11 01:41:04 INFO namenode.FSNamesystem: supergroup=supergroup
17/04/11 01:41:04 INFO namenode.FSNamesystem: isPermissionEnabled=true
17/04/11 01:41:04 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
17/04/11 01:41:04 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
17/04/11 01:41:04 INFO namenode.FSEditLog: dfs.namenode.edits.toleration.length = 0
17/04/11 01:41:04 INFO namenode.NameNode: Caching file names occuring more than 10 times
17/04/11 01:41:04 INFO common.Storage: Image file /usr/local/hadoop-1.2.1/tmp/dfs/name/current/fsimage of size 110 bytes saved in 0 seconds.
17/04/11 01:41:04 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/usr/local/hadoop-1.2.1/tmp/dfs/name/current/edits
17/04/11 01:41:04 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/usr/local/hadoop-1.2.1/tmp/dfs/name/current/edits
17/04/11 01:41:04 INFO common.Storage: Storage directory /usr/local/hadoop-1.2.1/tmp/dfs/name has been successfully formatted.
17/04/11 01:41:04 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.1.34
************************************************************/
(2)启动hadoop
[root@master bin]# ./start-all.sh
starting namenode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-namenode-master.out
slave1: starting datanode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-datanode-slave1.out
The authenticity of host 'master (192.168.1.34)' can't be established.
RSA key fingerprint is e3:c2:f6:71:e2:e7:97:00:f1:b7:c2:86:42:7f:5d:2c.
Are you sure you want to continue connecting (yes/no)? yes
master: Warning: Permanently added 'master,192.168.1.34' (RSA) to the list of known hosts.
master: starting secondarynamenode, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-secondarynamenode-master.out
starting jobtracker, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-jobtracker-master.out
slave1: starting tasktracker, logging to /usr/local/hadoop-1.2.1/libexec/../logs/hadoop-root-tasktracker-slave1.out
5.检查hadoop集群环境是否成功启动
(1)master使用jps查看
[root@master bin]# jps
2461 JobTracker
2227 NameNode
2383 SecondaryNameNode
2603 Jps
(2)slave1使用jps查看
[root@slave1 local]# jps
2068 Jps
1982 TaskTracker
1908 DataNode
(3)master使用hdfs命令
[root@master bin]# ./hadoop fs -ls /
Found 1 items
drwxr-xr-x - root supergroup 0 2017-04-11 01:43 /usr
[root@master bin]# ./hadoop fs -put /etc/passwd /
[root@master bin]# ./hadoop fs -ls /
Found 2 items
-rw-r--r-- 3 root supergroup 1380 2017-04-11 01:48 /passwd
drwxr-xr-x - root supergroup 0 2017-04-11 01:43 /usr
#Game Over