- conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(new Scan()));
- conf.set(TableInputFormat.SCAN, convertScanToString(new Scan()));
上面的改为下面的,为什么呢,因为TableMapReduceUtil类的那个函数是private的(也许以前是public吧,我用的是hbase-0.90.3),所以需要将那个函数的代码复制到当前任务类里
好了,上代码吧:
- /**
- * Copyright 2009 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package hbase.test;
-
- import java.io.ByteArrayOutputStream;
- import java.io.DataOutputStream;
- import java.io.IOException;
- import java.util.HashMap;
-
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.hbase.HBaseConfiguration;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.Scan;
- import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
- import org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat;
- import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
- import org.apache.hadoop.hbase.util.Base64;
- import org.apache.hadoop.hbase.util.Bytes;
- import org.apache.hadoop.io.Writable;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.util.GenericOptionsParser;
-
- /**
- * Example map/reduce job to construct index tables that can be used to quickly
- * find a row based on the value of a column. It demonstrates:
- * <ul>
- * <li>Using TableInputFormat and TableMapReduceUtil to use an HTable as input
- * to a map/reduce job.</li>
- * <li>Passing values from main method to children via the configuration.</li>
- * <li>Using MultiTableOutputFormat to output to multiple tables from a
- * map/reduce job.</li>
- * <li>A real use case of building a secondary index over a table.</li>
- * </ul>
- *
- * <h3>Usage</h3>
- *
- * <p>
- * Modify ${HADOOP_HOME}/conf/hadoop-env.sh to include the hbase jar, the
- * zookeeper jar, the examples output directory, and the hbase conf directory in
- * HADOOP_CLASSPATH, and then run
- * <tt><strong>bin/hadoop org.apache.hadoop.hbase.mapreduce.IndexBuilder TABLE_NAME COLUMN_FAMILY ATTR [ATTR ...]</strong></tt>
- * </p>
- *
- * <p>
- * To run with the sample data provided in index-builder-setup.rb, use the
- * arguments <strong><tt>people attributes name email phone</tt></strong>.
- * </p>
- *
- * <p>
- * This code was written against HBase 0.21 trunk.
- * </p>
- */
- public class IndexBuilder {
- /** the column family containing the indexed row key */
- public static final byte[] INDEX_COLUMN = Bytes.toBytes("INDEX");
- /** the qualifier containing the indexed row key */
- public static final byte[] INDEX_QUALIFIER = Bytes.toBytes("ROW");
-
- /**
- * Internal Mapper to be run by Hadoop.
- */
- public static class Map extends
- Mapper<ImmutableBytesWritable, Result, ImmutableBytesWritable, Writable> {
- private byte[] family;
- private HashMap<byte[], ImmutableBytesWritable> indexes;
-
- @Override
- protected void map(ImmutableBytesWritable rowKey, Result result, Context context)
- throws IOException, InterruptedException {
- for(java.util.Map.Entry<byte[], ImmutableBytesWritable> index : indexes.entrySet()) {
- byte[] qualifier = index.getKey();
- ImmutableBytesWritable tableName = index.getValue();
- byte[] value = result.getValue(family, qualifier);
- if (value != null) {
- // original: row 123 attribute:phone 555-1212
- // index: row 555-1212 INDEX:ROW 123
- Put put = new Put(value);
- put.add(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get());
- context.write(tableName, put);
- }
- }
- }
-
- @Override
- protected void setup(Context context) throws IOException,
- InterruptedException {
- Configuration configuration = context.getConfiguration();
- String tableName = configuration.get("index.tablename");
- String[] fields = configuration.getStrings("index.fields");
- String familyName = configuration.get("index.familyname");
- family = Bytes.toBytes(familyName);
- indexes = new HashMap<byte[], ImmutableBytesWritable>();
- for(String field : fields) {
- // if the table is "people" and the field to index is "email", then the
- // index table will be called "people-email"
- indexes.put(Bytes.toBytes(field),
- new ImmutableBytesWritable(Bytes.toBytes(tableName + "-" + field)));
- }
- }
- }
-
- /**
- * Job configuration.
- */
- public static Job configureJob(Configuration conf, String [] args)
- throws IOException {
- String tableName = args[0];
- String columnFamily = args[1];
- System.out.println("****" + tableName);
- conf.set(TableInputFormat.SCAN, convertScanToString(new Scan()));
- conf.set(TableInputFormat.INPUT_TABLE, tableName);
- conf.set("index.tablename", tableName);
- conf.set("index.familyname", columnFamily);
- String[] fields = new String[args.length - 2];
- for(int i = 0; i < fields.length; i++) {
- fields[i] = args[i + 2];
- }
- conf.setStrings("index.fields", fields);
- //conf.set("index.familyname", "attributes");
- Job job = new Job(conf, tableName);
- job.setJarByClass(IndexBuilder.class);
- job.setMapperClass(Map.class);
- job.setNumReduceTasks(0);
- job.setInputFormatClass(TableInputFormat.class);
- job.setOutputFormatClass(MultiTableOutputFormat.class);
- return job;
- }
-
- public static void main(String[] args) throws Exception {
- Configuration conf = HBaseConfiguration.create();
- conf.set("hbase.zookeeper.quorum", "node2,node4,node3");
- //conf.set("fs.default.name","hdfs://node1");
- //conf.set("mapred.job.tracker","node1:54311");
- String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
- if(otherArgs.length < 3) {
- System.err.println("Only " + otherArgs.length + " arguments supplied, required: 3");
- System.err.println("Usage: IndexBuilder <TABLE_NAME> <COLUMN_FAMILY> <ATTR> [<ATTR> ...]");
- System.exit(-1);
- }
- Job job = configureJob(conf, otherArgs);
- System.exit(job.waitForCompletion(true) ? 0 : 1);
- }
- private static String convertScanToString(Scan scan) throws IOException {
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(out);
- scan.write(dos);
- return Base64.encodeBytes(out.toByteArray());
- }
- }
大家看到了,我是放在hbase.test包里的,于是我将这个包打成jar包,然后放到hadoop集群里,执行hadoop jar mapreduceindexbuilder.jar 'it', 'f1', 'q1'
之所以这样写,因为我export的时候已经选了类了,所以不用再输类名,接下来的三个参数分别是 表名,列族名,修饰符名
只是简单的测试而已。
一直报错。
出现下面的异常,主要就是CLASSNOTFOUNDEXCEPTION
大部分此类问题都是因为HADOOP_CLASSPATH没有设置好,只要将hbase的jar包hbase-0.90.3.jar及zookeeper的jar包(这个大家都知道自己用的是哪个哈)的路径添加进去便可以了。
但是今天仔细检查了好几遍HADOOP_CLASSPATH 发现hbase和zookeeper的jar包都在啊。
还需要注意的两点是:
job.setJarByClass(IndexBuilder.class); 这句一定要有,没有肯定找不到的!
map类放在执行main函数的类里,必须将map类设置成static才行
最后灵光一闪,会不会是自己打的jar包没有全啊,于是将整个项目export成一个jar包,然后便可以了。
唉!!!!!!!!!!
一定要将整个项目打成jar包啊!!!否则不行,切记!!!
12/01/09 16:06:23 INFO mapred.JobClient: Task Id : attempt_201201091352_0009_m_000000_1, Status : FAILED
java.lang.RuntimeException: java.lang.ClassNotFoundException: org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:996)
at org.apache.hadoop.mapreduce.JobContext.getOutputFormatClass(JobContext.java:248)
at org.apache.hadoop.mapred.Task.initialize(Task.java:501)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:308)
at org.apache.hadoop.mapred.Child$4.run(Child.java:270)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1127)
at org.apache.hadoop.mapred.Child.main(Child.java:264)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat
at java.net.URLClassLoader$1.run(URLClassLoader.java:200)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:188)
at java.lang.ClassLoader.loadClass(ClassLoader.java:307)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
at java.lang.ClassLoader.loadClass(ClassLoader.java:252)
at java.lang.ClassLoader.loadClassInternal(ClassLoader.java:320)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:247)
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:943)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:994)
... 8 more
12/01/09 16:06:25 INFO mapred.JobClient: Task Id : attempt_201201091352_0009_m_000000_2, Status : FAILED
Error: java.lang.ClassNotFoundException: org.apache.zookeeper.KeeperException
at java.net.URLClassLoader$1.run(URLClassLoader.java:200)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:188)
at java.lang.ClassLoader.loadClass(ClassLoader.java:307)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
at java.lang.ClassLoader.loadClass(ClassLoader.java:252)
at java.lang.ClassLoader.loadClassInternal(ClassLoader.java:320)
at org.apache.hadoop.hbase.mapreduce.TableInputFormat.setConf(TableInputFormat.java:91)
at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:62)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:615)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:325)
at org.apache.hadoop.mapred.Child$4.run(Child.java:270)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1127)
at org.apache.hadoop.mapred.Child.main(Child.java:264)
我的表是两个,原表 it ,列族 f1,f2;修饰符f1:q1
索引表 it-q1:列族INDEX,修饰符INDEX:ROW
执行前it-q1为空,it为
hbase(main):031:0> scan 'it'
ROW COLUMN+CELL
001 column=f1:q1, timestamp=1326076812129, value=009
001 column=f1:q2, timestamp=1326076847867, value=123
002 column=f1:q1, timestamp=1326076862300, value=008
003 column=f1:q1, timestamp=1326076870450, value=007
004 column=f1:q1, timestamp=1326076884825, value=006
005 column=f1:q1, timestamp=1326076890460, value=005
006 column=f1:q1, timestamp=1326076895971, value=004
007 column=f1:q1, timestamp=1326076901697, value=003
008 column=f1:q1, timestamp=1326076906960, value=002
009 column=f1:q1, timestamp=1326076913531, value=001
010 column=f1:q1, timestamp=1326076921461, value=000
020 column=f1:q1, timestamp=1326109481769, value=20
030 column=f1:q1, timestamp=1326109530945, value=10
12 row(s) in 0.0590 seconds
执行之后 it-f1表为:
hbase(main):032:0> scan 'it-q1'
ROW COLUMN+CELL
000 column=INDEX:ROW, timestamp=1326109548266, value=010
001 column=INDEX:ROW, timestamp=1326109548266, value=009
002 column=INDEX:ROW, timestamp=1326109548266, value=008
003 column=INDEX:ROW, timestamp=1326109548266, value=007
004 column=INDEX:ROW, timestamp=1326109548266, value=006
005 column=INDEX:ROW, timestamp=1326109548266, value=005
006 column=INDEX:ROW, timestamp=1326109548266, value=004
007 column=INDEX:ROW, timestamp=1326109548266, value=003
008 column=INDEX:ROW, timestamp=1326109548266, value=002
009 column=INDEX:ROW, timestamp=1326109548266, value=001
10 column=INDEX:ROW, timestamp=1326109548266, value=030
20 column=INDEX:ROW, timestamp=1326109548266, value=020
12 row(s) in 0.0600 seconds
执行的状态显示:
hadoop jar mapindexbuilder.jar it f1 q1
****it
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:host.name=node1
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.version=1.6.0_16
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Sun Microsystems Inc.
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/java/jdk1.6.0_16/jre
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.class.path=这个太长了,删去了,下面带ip的信息也删了,呵呵
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/usr/lib/hadoop-0.20/lib/native/Linux-amd64-64
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd64
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:os.version=2.6.18-194.el5
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:user.name=root
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:user.home=/root
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Client environment:user.dir=/root/workspace/wang
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=node3:2181,node4:2181,node2:2181 sessionTimeout=180000 watcher=hconnection
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Opening socket connection to server node4/
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Socket connection established to node4/, initiating session
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Session establishment complete on server node4/, sessionid = 0x334c10a876b00b5, negotiated timeout = 180000
12/01/09 19:45:44 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=node3:2181,node4:2181,node2:2181 sessionTimeout=180000 watcher=hconnection
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Opening socket connection to server node4/
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Socket connection established to node4/, initiating session
12/01/09 19:45:44 INFO zookeeper.ClientCnxn: Session establishment complete on server node4/, sessionid = 0x334c10a876b00b6, negotiated timeout = 180000
12/01/09 19:45:44 INFO mapreduce.TableInputFormatBase: running server=6, map range low=0, map range high=-6
12/01/09 19:45:45 INFO mapred.JobClient: Running job: job_201201091352_0019
12/01/09 19:45:46 INFO mapred.JobClient: map 0% reduce 0%
12/01/09 19:45:50 INFO mapred.JobClient: map 100% reduce 0%
12/01/09 19:45:50 INFO mapred.JobClient: Job complete: job_201201091352_0019
12/01/09 19:45:50 INFO mapred.JobClient: Counters: 12
12/01/09 19:45:50 INFO mapred.JobClient: Job Counters
12/01/09 19:45:50 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=3061
12/01/09 19:45:50 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
12/01/09 19:45:50 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
12/01/09 19:45:50 INFO mapred.JobClient: Rack-local map tasks=1
12/01/09 19:45:50 INFO mapred.JobClient: Launched map tasks=1
12/01/09 19:45:50 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=0
12/01/09 19:45:50 INFO mapred.JobClient: FileSystemCounters
12/01/09 19:45:50 INFO mapred.JobClient: HDFS_BYTES_READ=56
12/01/09 19:45:50 INFO mapred.JobClient: FILE_BYTES_WRITTEN=56750
12/01/09 19:45:50 INFO mapred.JobClient: Map-Reduce Framework
12/01/09 19:45:50 INFO mapred.JobClient: Map input records=12
12/01/09 19:45:50 INFO mapred.JobClient: Spilled Records=0
12/01/09 19:45:50 INFO mapred.JobClient: Map output records=12
12/01/09 19:45:50 INFO mapred.JobClient: SPLIT_RAW_BYTES=56