貌似天国还没有介绍1.99.4的java操作代码的,自己吃一次螃蟹吧


如果你是MAVEN项目

 <dependency>
<groupId>org.apache.sqoop</groupId>
<artifactId>sqoop-client</artifactId>
<version>1.99.4</version>
</dependency>

如果你是java项目

导入sqoop1.99.4中shell目录下的lib里面全部jar包就行(不用server中的)


HDFS->MYSQL

 package org.admln.sqoopOperate;

 import org.apache.sqoop.client.SqoopClient;
import org.apache.sqoop.model.MFromConfig;
import org.apache.sqoop.model.MJob;
import org.apache.sqoop.model.MLink;
import org.apache.sqoop.model.MLinkConfig;
import org.apache.sqoop.model.MSubmission;
import org.apache.sqoop.model.MToConfig;
import org.apache.sqoop.submission.counter.Counter;
import org.apache.sqoop.submission.counter.CounterGroup;
import org.apache.sqoop.submission.counter.Counters;
import org.apache.sqoop.validation.Status; public class HDFSToMysql {
public static void main(String[] args) {
sqoopTransfer();
}
public static void sqoopTransfer() {
//初始化
String url = "http://hadoop:12000/sqoop/";
SqoopClient client = new SqoopClient(url); //创建一个源链接 HDFS
long fromConnectorId = 1;
MLink fromLink = client.createLink(fromConnectorId);
fromLink.setName("HDFS connector");
fromLink.setCreationUser("admln");
MLinkConfig fromLinkConfig = fromLink.getConnectorLinkConfig();
fromLinkConfig.getStringInput("linkConfig.uri").setValue("hdfs://hadoop:8020/");
Status fromStatus = client.saveLink(fromLink);
if(fromStatus.canProceed()) {
System.out.println("创建HDFS Link成功,ID为: " + fromLink.getPersistenceId());
} else {
System.out.println("创建HDFS Link失败");
}
//创建一个目的地链接 JDBC
long toConnectorId = 2;
MLink toLink = client.createLink(toConnectorId);
toLink.setName("JDBC connector");
toLink.setCreationUser("admln");
MLinkConfig toLinkConfig = toLink.getConnectorLinkConfig();
toLinkConfig.getStringInput("linkConfig.connectionString").setValue("jdbc:mysql://hadoop:3306/hive");
toLinkConfig.getStringInput("linkConfig.jdbcDriver").setValue("com.mysql.jdbc.Driver");
toLinkConfig.getStringInput("linkConfig.username").setValue("hive");
toLinkConfig.getStringInput("linkConfig.password").setValue("hive");
Status toStatus = client.saveLink(toLink);
if(toStatus.canProceed()) {
System.out.println("创建JDBC Link成功,ID为: " + toLink.getPersistenceId());
} else {
System.out.println("创建JDBC Link失败");
} //创建一个任务
long fromLinkId = fromLink.getPersistenceId();
long toLinkId = toLink.getPersistenceId();
MJob job = client.createJob(fromLinkId, toLinkId);
job.setName("HDFS to MySQL job");
job.setCreationUser("admln");
//设置源链接任务配置信息
MFromConfig fromJobConfig = job.getFromJobConfig();
fromJobConfig.getStringInput("fromJobConfig.inputDirectory").setValue("/out/aboutyunLog/HiveExport/ipstatistical/data"); //创建目的地链接任务配置信息
MToConfig toJobConfig = job.getToJobConfig();
toJobConfig.getStringInput("toJobConfig.schemaName").setValue("aboutyunlog");
toJobConfig.getStringInput("toJobConfig.tableName").setValue("ipstatistical");
//toJobConfig.getStringInput("fromJobConfig.partitionColumn").setValue("id");
// set the driver config values
//MDriverConfig driverConfig = job.getDriverConfig();
//driverConfig.getStringInput("throttlingConfig.numExtractors").setValue("3");//这句还没弄明白
Status status = client.saveJob(job);
if(status.canProceed()) {
System.out.println("JOB创建成功,ID为: "+ job.getPersistenceId());
} else {
System.out.println("JOB创建失败。");
} //启动任务
long jobId = job.getPersistenceId();
MSubmission submission = client.startJob(jobId);
System.out.println("JOB提交状态为 : " + submission.getStatus());
while(submission.getStatus().isRunning() && submission.getProgress() != -1) {
System.out.println("进度 : " + String.format("%.2f %%", submission.getProgress() * 100));
//三秒报告一次进度
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
System.out.println("JOB执行结束... ...");
System.out.println("Hadoop任务ID为 :" + submission.getExternalId());
Counters counters = submission.getCounters();
if(counters != null) {
System.out.println("计数器:");
for(CounterGroup group : counters) {
System.out.print("\t");
System.out.println(group.getName());
for(Counter counter : group) {
System.out.print("\t\t");
System.out.print(counter.getName());
System.out.print(": ");
System.out.println(counter.getValue());
}
}
}
if(submission.getExceptionInfo() != null) {
System.out.println("JOB执行异常,异常信息为 : " +submission.getExceptionInfo());
}
System.out.println("HDFS通过sqoop传输数据到MySQL统计执行完毕");
}
}

MYSQL->HDFS

 package org.admln.sqoopOperate;

 import org.apache.sqoop.client.SqoopClient;
import org.apache.sqoop.model.MDriverConfig;
import org.apache.sqoop.model.MFromConfig;
import org.apache.sqoop.model.MJob;
import org.apache.sqoop.model.MLink;
import org.apache.sqoop.model.MLinkConfig;
import org.apache.sqoop.model.MSubmission;
import org.apache.sqoop.model.MToConfig;
import org.apache.sqoop.submission.counter.Counter;
import org.apache.sqoop.submission.counter.CounterGroup;
import org.apache.sqoop.submission.counter.Counters;
import org.apache.sqoop.validation.Status; public class MysqlToHDFS {
public static void main(String[] args) {
sqoopTransfer();
}
public static void sqoopTransfer() {
//初始化
String url = "http://hadoop:12000/sqoop/";
SqoopClient client = new SqoopClient(url); //创建一个源链接 JDBC
long fromConnectorId = 2;
MLink fromLink = client.createLink(fromConnectorId);
fromLink.setName("JDBC connector");
fromLink.setCreationUser("admln");
MLinkConfig fromLinkConfig = fromLink.getConnectorLinkConfig();
fromLinkConfig.getStringInput("linkConfig.connectionString").setValue("jdbc:mysql://hadoop:3306/hive");
fromLinkConfig.getStringInput("linkConfig.jdbcDriver").setValue("com.mysql.jdbc.Driver");
fromLinkConfig.getStringInput("linkConfig.username").setValue("hive");
fromLinkConfig.getStringInput("linkConfig.password").setValue("hive");
Status fromStatus = client.saveLink(fromLink);
if(fromStatus.canProceed()) {
System.out.println("创建JDBC Link成功,ID为: " + fromLink.getPersistenceId());
} else {
System.out.println("创建JDBC Link失败");
}
//创建一个目的地链接HDFS
long toConnectorId = 1;
MLink toLink = client.createLink(toConnectorId);
toLink.setName("HDFS connector");
toLink.setCreationUser("admln");
MLinkConfig toLinkConfig = toLink.getConnectorLinkConfig();
toLinkConfig.getStringInput("linkConfig.uri").setValue("hdfs://hadoop:8020/");
Status toStatus = client.saveLink(toLink);
if(toStatus.canProceed()) {
System.out.println("创建HDFS Link成功,ID为: " + toLink.getPersistenceId());
} else {
System.out.println("创建HDFS Link失败");
} //创建一个任务
long fromLinkId = fromLink.getPersistenceId();
long toLinkId = toLink.getPersistenceId();
MJob job = client.createJob(fromLinkId, toLinkId);
job.setName("MySQL to HDFS job");
job.setCreationUser("admln");
//设置源链接任务配置信息
MFromConfig fromJobConfig = job.getFromJobConfig();
fromJobConfig.getStringInput("fromJobConfig.schemaName").setValue("sqoop");
fromJobConfig.getStringInput("fromJobConfig.tableName").setValue("sqoop");
fromJobConfig.getStringInput("fromJobConfig.partitionColumn").setValue("id");
MToConfig toJobConfig = job.getToJobConfig();
toJobConfig.getStringInput("toJobConfig.outputDirectory").setValue("/usr/tmp");
MDriverConfig driverConfig = job.getDriverConfig();
driverConfig.getStringInput("throttlingConfig.numExtractors").setValue("3"); Status status = client.saveJob(job);
if(status.canProceed()) {
System.out.println("JOB创建成功,ID为: "+ job.getPersistenceId());
} else {
System.out.println("JOB创建失败。");
} //启动任务
long jobId = job.getPersistenceId();
MSubmission submission = client.startJob(jobId);
System.out.println("JOB提交状态为 : " + submission.getStatus());
while(submission.getStatus().isRunning() && submission.getProgress() != -1) {
System.out.println("进度 : " + String.format("%.2f %%", submission.getProgress() * 100));
//三秒报告一次进度
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
System.out.println("JOB执行结束... ...");
System.out.println("Hadoop任务ID为 :" + submission.getExternalId());
Counters counters = submission.getCounters();
if(counters != null) {
System.out.println("计数器:");
for(CounterGroup group : counters) {
System.out.print("\t");
System.out.println(group.getName());
for(Counter counter : group) {
System.out.print("\t\t");
System.out.print(counter.getName());
System.out.print(": ");
System.out.println(counter.getValue());
}
}
}
if(submission.getExceptionInfo() != null) {
System.out.println("JOB执行异常,异常信息为 : " +submission.getExceptionInfo());
}
System.out.println("MySQL通过sqoop传输数据到HDFS统计执行完毕");
}
}

别问为什么没有MYSQL和HBASE、HIVE互导的代码


20150102

sqoop1.99.4 JAVA API操作的更多相关文章

  1. MongoDB Java API操作很全的整理

    MongoDB 是一个基于分布式文件存储的数据库.由 C++ 语言编写,一般生产上建议以共享分片的形式来部署. 但是MongoDB官方也提供了其它语言的客户端操作API.如下图所示: 提供了C.C++ ...

  2. hive-通过Java API操作

    通过Java API操作hive,算是测试hive第三种对外接口 测试hive 服务启动 package org.admln.hive; import java.sql.SQLException; i ...

  3. hadoop2-HBase的Java API操作

    Hbase提供了丰富的Java API,以及线程池操作,下面我用线程池来展示一下使用Java API操作Hbase. 项目结构如下: 我使用的Hbase的版本是 hbase-0.98.9-hadoop ...

  4. 使用Java API操作HDFS文件系统

    使用Junit封装HFDS import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org ...

  5. Kafka系列三 java API操作

    使用java API操作kafka 1.pom.xml <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xs ...

  6. Hadoop之HDFS(三)HDFS的JAVA API操作

    HDFS的JAVA API操作 HDFS 在生产应用中主要是客户端的开发,其核心步骤是从 HDFS 提供的 api中构造一个 HDFS 的访问客户端对象,然后通过该客户端对象操作(增删改查)HDFS ...

  7. zookeeper的java api操作

    zookeeper的java api操作 创建会话: Zookeeper(String connectString,int sessionTimeout,Watcher watcher) Zookee ...

  8. java api操作

    java api操作 导入开发包 将hbase安装包中lib下包导入java项目   创建表   Configuration conf = HBaseConfiguration.create(); c ...

  9. HDFS 05 - HDFS 常用的 Java API 操作

    目录 0 - 配置 Hadoop 环境(Windows系统) 1 - 导入 Maven 依赖 2 - 常用类介绍 3 - 常见 API 操作 3.1 获取文件系统(重要) 3.2 创建目录.写入文件 ...

随机推荐

  1. dom 动态生产表格

    <!doctype html> <html> <head> <meta charset="utf-8"> <title> ...

  2. hdu 1155 Bungee Jumping

    http://acm.hdu.edu.cn/showproblem.php?pid=1155 Bungee Jumping Time Limit: 2000/1000 MS (Java/Others) ...

  3. 解决 Unable to load DLL 'OraOps9.dll': 找不到指定的模块。 (Exception from HRESULT: 0x8007007E)

    这个问题网上的答案是把oracle的home文件夹权限变为完全控制,然而并没有好用,还有一种方法是在编写的程序的config文件加入 <runtime> <legacyCorrupt ...

  4. Cocos2d-x 关于在iOS平台真机测试的一些注意

    下面简单记录一下在最近cocos2d-x项目在iOS平台真机测试和模拟器测试中遇到的一些要注意的地方(使用ipod): 1.图片大小 游戏中基本上都是会用到图片,那么在使用图片的时候要特别注意图片的s ...

  5. ThinkPHP C+F方式

    ThinkPHP常用C+F方法进行配置设置于缓存设置 比如常见的 C(F('smtp'),'smtp');表示获取F方法中smtp缓存,设置配置为smtp函数 C方法是ThinkPHP用于设置.获取, ...

  6. Remove Duplicates from Sorted List @LeetCode

    /** * Remove Duplicates from Sorted List * * Given a sorted linked list, delete all duplicates such ...

  7. EasyUI Accordion下的Panel面板初始化时全部折叠

    EasyUI Accordion下的Panel面板有一个属性:selected,默认值为:false.初始化时,若设置'selected:true',则面板默认打开,效果如下: <div tit ...

  8. ios和android一并学习的体会

    如果说为什么要同时学习这两种不同的移动平台,其实有一定的“闲”的因素在里面. 相对于ios,android我是早半年接触的.最开始学习的时候也就是j2ee学习的延续,通过看视频连带看书学了大概一个月的 ...

  9. Unable to generate a temporary class (result=1)解决方法

    Unable to generate a temporary class (result=1).error CS2001: Source file 'C:\WINDOWS\TEMP\ug5v9uxt. ...

  10. 第四章TPLINK 703n 重要恢复方法,非TTL串口连接

    途中有一次为了试图能够在703N上挂载普通usb(可用空间只有2M多点),卸载了不少系统软件,甚至把UCI给卸载了,导致系统起来后没有SSH服务,只有DNS服务,几乎变砖.百般无奈下,终于找到有高人提 ...