taskkill /f /im java.exe /opt/hadoop/bin/hadoop-daemon.sh start namenode /opt/hadoop/bin/hadoop-daemon.sh start secondarynamenode /opt/hadoop/bin/hadoop-daemon.sh start datanode /opt/hadoop/bin/hadoop-daemon.sh start jobtracker /opt/hadoop/bin/hadoop-daemon.sh start tasktracker
/opt/hadoop/bin/hadoop-daemon.sh stop tasktracker /opt/hadoop/bin/hadoop-daemon.sh stop datanode /opt/hadoop/bin/hadoop-daemon.sh stop secondarynamenode /opt/hadoop/bin/hadoop-daemon.sh stop jobtracker /opt/hadoop/bin/hadoop-daemon.sh stop namenode
HDFS 相關操作範例
>bin/hadoop namenode -format
>bin/hadoop fs -mkdir /user/root/tmp
>bin/hadoop fs -ls /user/root/tmp
>bin/hadoop fs -put conf/* /user/root/tmp
>bin/hadoop fs -ls /user/root/tmp
>bin/hadoop fs -cat /user/root/tmp/core-site.xml
>bin/hadoop fs -get /user/root/tmp/core-site.xml /opt/hadoop/
>bin/hadoop fs -rm /user/root/tmp/core-site.xml
>bin/hadoop fs -rmr /user/root/tmp
MapReduce 相關操作範例
>bin/hadoop job -list all
>bin/hadoop job -status job_??????_????
>bin/hadoop job -history /user/root/output
>bin/hadoop jar hadoop-0.20.2-examples.jar grep input output 'config[a-z.]+'
JobTracker http://localhost:50030/jobtracker.jsp
HBase 相關操作範例
(hadoop4win) start-hbase
>cd /opt/hbase
>bin/hbase shell
hbase(main):001:0>create 'scores'.'studentid', 'course'
hbase(main):001:0>list
hbase(main):001:0>describe 'scores'
hbase(main):001:0>put 'scores', 'jdwang', 'studentid:', '1'
hbase(main):001:0>put 'scores','jdwang','course:math','80'
hbase(main):001:0>put 'scores','jdwang','course:history','85'
hbase(main):001:0>put 'scores','lucy','studentid,'2'
hbase(main):001:0>put 'scores','lucy','course:math','75'
hbase(main):001:0>put 'scores','lucy','course:history','90'
hbase(main):001:0>get 'scores','jdwang'
hbase(main):001:0>get 'scores','lucy'
hbase(main):001:0>scan 'scores'
hbase(main):001:0>scan 'scores',{COLUMNS=>'course:'}
hbase(main):001:0>scan 'scores',{COLUMNS=>['studnets','course:']}
hbase(main):001:0>disable 'scores'
hbase(main):001:0>drop 'scores'
ZooKeeper http://localhost:60010/
cd $HADOOP_HOME
=> cd opt/hadoop
$ mkdir inputwc
$ bin/hadoop dfs -mkdir input
$ echo "This is a test" > inputwc/input1
$ echo "That is also a test" > inputwc/input2
$ bin/hadoop dfs -put inputwc input
$ bin/hadoop dfs -ls input
http://trac.nchc.org.tw/cloud/attachment/wiki/jazz/Hadoop_Lab6/WordCount.java?format=raw
wordcount.java => wordcount_jdwang1.java
line10=>public class WordCount_jdwang1 {
line37=>JobConf conf = new JobConf(WordCount_jdwang1.class);
$ mkdir MyJava
$ javac -classpath hadoop-0.20.2-core.jar -d MyJava WordCount_jdwang.java
$ jar -cvf wordcount_jdwang1.jar -C Myjava .
$ bin/hadoop jar wordcount_jdwang.jar WordCount_jdwang1 input/ output/
=>/opt/hadoop/bin/hadoop
fs -rmr output (要先清掉舊資料才可以得到新的結果)
$ bin/hadoop dfs -ls output
$ bin/hadoop dfs -cat output/part-00000
$ bin/hadoop dfs -copyToLocal output/part-00000 output/part-00000_jdwang.txt
=================================================================
http://trac.nchc.org.tw/cloud/raw-attachment/wiki/jazz/Hadoop_Lab6/WordCount2.java
line11=>public class
WordCount2_jdwang2
extends Configured implements Tool {
line86=>JobConf conf = new JobConf(getConf(),
WordCount2_jdwang2.class);
line117=>int res = ToolRunner.run(new Configuration(), new
WordCount2_jdwang2(),
args);
$ echo “\.” > pattern.txt
$ echo “\,” >> pattern.txt
$ mkdir MyJava2
$ javac -classpath hadoop-0.20.2-core.jar -d MyJava2 WordCount2_jdwang.java
$ jar -cvf wordcount2_jdwang2.jar -C MyJava2 .
$ bin/hadoop jar wordcount2.jar WordCount2_jdwang input output2 -skip
pattern.txt
$ bin/hadoop dfs -cat output2/part-00000
$ bin/hadoop dfs -copyFromLocal output2 output2