最近公司准备接ceph储存,研究了一番,准备用亚马逊的s3接口实现,实现类如下:

/**
* Title: S3Manager
* Description: Ceph储存的s3接口实现,参考文档:
* https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/dev/RetrievingObjectUsingJava.html
* http://docs.ceph.org.cn/radosgw/s3/
* author: xu jun
* date: 2018/10/22
*/
@Slf4j
@Service
public class S3Manager extends StorageManagerBase implements StorageManager {
private final UKID ukid;
private final S3ClientConfig s3ClientConfig;
private final RedisManage redisManage;
private AmazonS3 amazonClient; @Autowired
public S3Manager(UKID ukid, S3ClientConfig s3ClientConfig, RedisManage redisManage) {
this.ukid = ukid;
this.s3ClientConfig = s3ClientConfig;
this.redisManage = redisManage;
} private AmazonS3 getAmazonClient() {
if (amazonClient == null) {
String accessKey = s3ClientConfig.getAccessKey();
String secretKey = s3ClientConfig.getSecretKey();
String endpoint = s3ClientConfig.getEndPoint(); AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol(Protocol.HTTP); AmazonS3 conn = AmazonS3ClientBuilder.standard()
.withClientConfiguration(clientConfig)
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, ""))
.withPathStyleAccessEnabled(true)
.build(); //检查储存空间是否创建
checkBucket(conn);
amazonClient = conn;
}
return amazonClient;
} @Override
public String uploadFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload file start"); //生成上传文件的随机序号
long fileId = ukid.getGeneratorID();
String fileName = Long.toString(fileId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient(); PutObjectResult result = conn.putObject(bucketName, fileName, new ByteArrayInputStream(fileData), null);
log.info("Storage s3 api, put object result :{}", result); log.info("Storage s3 api, upload file end, file name:" + fileName);
return fileName;
} @Override
public String uploadAppenderFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload appender file start"); //生成上传文件的随机序号
long ukId = ukid.getGeneratorID();
String fileName = Long.toString(ukId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
List<PartETag> partETags = new ArrayList<>();
//初始化分片上传
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, fileName);
InitiateMultipartUploadResult initResponse = conn.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(fileData);
Integer contentLength = fileData.length;
// 文件上传
UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber()
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream);
UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
}
partETags.add(uploadPartResult.getPartETag());
Integer partNumber = uploadPartResult.getPartNumber(); S3CacheMode cacheMode = new S3CacheMode();
cacheMode.setPartETags(partETags);
cacheMode.setPartNumber(partNumber);
cacheMode.setUploadId(uploadId);
redisManage.set(fileName, cacheMode); log.info("Storage s3 api, upload appender file end, fileName: {}", fileName);
return fileName;
} @Override
public void uploadChunkFile(ChunkFileSaveParams chunkFileSaveParams) {
log.info("Storage s3 api, upload chunk file start"); String fileName = chunkFileSaveParams.getFileAddress();
Result result = redisManage.get(fileName);
JSONObject jsonObject = (JSONObject) result.getData();
if (jsonObject == null) {
throw FileCenterExceptionConstants.CACHE_DATA_NOT_EXIST;
}
S3CacheMode cacheMode = jsonObject.toJavaObject(S3CacheMode.class);
Integer partNumber = cacheMode.partNumber;
String uploadId = cacheMode.getUploadId();
List<PartETag> partETags = cacheMode.partETags; //储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(chunkFileSaveParams.getBytes());
Integer contentLength = chunkFileSaveParams.getBytes().length; UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber(partNumber + )
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream); UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest);
partETags.add(uploadPartResult.getPartETag());
partNumber = uploadPartResult.getPartNumber(); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} S3CacheMode cacheModeUpdate = new S3CacheMode();
cacheModeUpdate.setPartETags(partETags);
cacheModeUpdate.setPartNumber(partNumber);
cacheModeUpdate.setUploadId(uploadId);
redisManage.set(fileName, cacheModeUpdate); if (chunkFileSaveParams.getChunk().equals(chunkFileSaveParams.getChunks() - )) {
//完成分片上传,生成储存对象
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, fileName,
uploadId, partETags);
conn.completeMultipartUpload(compRequest);
} log.info("Storage s3 api, upload chunk file end");
} @Override
public byte[] downloadFile(String fileName) {
log.info("Storage s3 api, download file start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
object = conn.getObject(bucketName, fileName);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.debug("Storage s3 api, get object result :{}", object); byte[] fileByte;
InputStream inputStream;
inputStream = object.getObjectContent();
try {
fileByte = IOUtils.toByteArray(inputStream);
inputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
}
log.info("Storage s3 api, download file end");
return fileByte;
} @Override
public byte[] downloadFile(String fileName, long fileOffset, long fileSize) {
log.info("Storage s3 api, download file by block start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, fileName)
.withRange(fileOffset, fileOffset + fileSize);
//范围下载。
object = conn.getObject(getObjectRequest);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.info("Storage s3 api, get object result :{}", object); // 读取数据。
byte[] buf;
InputStream in = object.getObjectContent();
try {
buf = inputToByte(in, (int) fileSize);
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
try {
in.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
log.info("Storage s3 api, download file by block end");
return buf;
} @Override
public String fileSecret(String filePath) {
return null;
} @Override
public String fileDecrypt(String filePath) {
return null;
} @Override
public String getDomain() {
return null;
} /**
* 检查储存空间是否已创建
*
* @param conn 客户端连接
*/
private void checkBucket(AmazonS3 conn) {
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
if (conn.doesBucketExist(bucketName)) {
log.debug("Storage s3 api, bucketName is found: " + bucketName);
} else {
log.warn("Storage s3 api, bucketName is not exist, create it: " + bucketName);
conn.createBucket(bucketName);
}
} /**
* inputStream转byte[]
*
* @param inStream 输入
* @param fileSize 文件大小
* @return 输出
* @throws IOException 异常
*/
private static byte[] inputToByte(InputStream inStream, int fileSize) throws IOException {
ByteArrayOutputStream swapStream = new ByteArrayOutputStream();
byte[] buff = new byte[fileSize];
int rc;
while ((rc = inStream.read(buff, , fileSize)) > ) {
swapStream.write(buff, , rc);
}
return swapStream.toByteArray();
} /**
* 调试用的方法,可以在控制台看到io的数据
*
* @param input 输入
* @throws IOException 异常
private static void displayTextInputStream(InputStream input) throws IOException {
// Read the text input stream one line at a time and display each line.
BufferedReader reader = new BufferedReader(new InputStreamReader(input));
String line;
while ((line = reader.readLine()) != null) {
log.info(line);
}
}
*/
}

业务接口要实现包括分片上传(支持断点续传)、分片下载等功能,上面类是底层类不包含业务逻辑。

maven依赖:

        <!-- ceph储存的接口 -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId>
<version>1.11.</version>
</dependency>

开发感受:

  1.ceph官网上提供的s3接口文档(java版),内容又少又旧,已经基本不能当做参考了。所以API和代码示例要去亚马逊官网上看(提供了中文版,好评)

  2.s3接口本身不提供文件追加储存的功能。所以在实现分片上传的时候,比较麻烦(不想fastDFS和OSS那么方便)

  3.分片上传默认最小限制是5M,要修改可以在服务器配置上做

  4.如果使用域名做端点的话,默认会把bucket的名字,作为子域名来访问(需要域名解析,所以不建议)。如果想作为路径来访问,需要在连接配置中指定。

ceph储存的S3接口实现(支持断点续传)的更多相关文章

  1. 基于LAMP php7.1搭建owncloud云盘与ceph对象存储S3借口整合案例

    ownCloud简介 是一个来自 KDE 社区开发的免费软件,提供私人的 Web 服务.当前主要功能包括文件管理(内建文件分享).音乐.日历.联系人等等,可在PC和服务器上运行. 简单来说就是一个基于 ...

  2. 使用COSBench工具对ceph s3接口进行压力测试--续

    之前写的使用COSBench工具对ceph s3接口进行压力测试是入门,在实际使用是,配置内容各不一样,下面列出 压力脚本是xml格式的,套用UserGuide文档说明,如下 有很多模板的例子,在co ...

  3. 使用COSBench工具对ceph s3接口进行压力测试

    一.COSBench安装 COSBench是Intel团队基于java开发,对云存储的测试工具,全称是Cloud object Storage Bench 吐槽下,貌似这套工具是intel上海团队开发 ...

  4. 分布式存储系统之Ceph集群访问接口启用

    前文我们使用ceph-deploy工具简单拉起了ceph底层存储集群RADOS,回顾请参考https://www.cnblogs.com/qiuhom-1874/p/16724473.html:今天我 ...

  5. FTP文件上传 支持断点续传 并 打印下载进度(二) —— 单线程实现

    这个就看代码,哈哈哈哈哈  需要用到的jar包是: <dependency> <groupId>commons-net</groupId> <artifact ...

  6. web大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  7. java大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  8. 【FTP】FTP文件上传下载-支持断点续传

    Jar包:apache的commons-net包: 支持断点续传 支持进度监控(有时出不来,搞不清原因) 相关知识点 编码格式: UTF-8等; 文件类型: 包括[BINARY_FILE_TYPE(常 ...

  9. 【SFTP】使用Jsch实现Sftp文件下载-支持断点续传和进程监控

    参考上篇文章: <[SFTP]使用Jsch实现Sftp文件下载-支持断点续传和进程监控>:http://www.cnblogs.com/ssslinppp/p/6248763.html  ...

随机推荐

  1. idea遇到的坑

    (1)在main方法中启动报错: 或 经检查是pom.xml文件依赖的问题,解决方法1.将如下截图的<scope>去掉就好了 解决方法2:scope不删掉,在下面这里执行run: (2)如 ...

  2. swp文件已存在

    vim编辑某个文件时,提示.xxx.sh.swp文件已存在是因为异常退出后,linux会生成一个swp文件,无论选择什么,下次进入还是会提示ll 命令无法看到文件使用 rm -rf .xxx.sh.s ...

  3. c 语言 随机数选取6个数 一定范围内

    种子来源 定时器/****************** 自动筛选种子 dat 目标种子 ************/ #define max 7 //随机生成最大的数为7 #define min 1 / ...

  4. 项目实战03:Keepalived 实现高可用

    目录 实验一:实现keepalived主从方式高可用基于LVS-DR模式的应用实战: 1.环境准备: 2.在lvs-server-master 主上 3.在lvs-server-backup 从上 4 ...

  5. [tldk][dpdk][dev] TLDK--基于dpdk的用户态协议栈传输层组件简单调研

    如题,以下是一份简单的快速调研. TLDK: Transport Layer Development Kit 一 什么是TLDK transport layer development kit 处理t ...

  6. linux学习:【第3篇】远程连接及软件安装

    狂神声明 : 文章均为自己的学习笔记 , 转载一定注明出处 ; 编辑不易 , 防君子不防小人~共勉 ! linux学习:[第3篇]远程连接及软件安装 远程连接 xshell , xftp软件官网 : ...

  7. nio例子

    跟传统io相比,nio会支持更大的并发量 nio去除了传统io的连接阻塞,和读写阻塞 服务器端 客户端 nio模型 传统io nio

  8. Cartographer源码阅读(5):PoseGraph位姿图

    PoseGraph位姿图 mapping2D::PoseGraph类的注释: // Implements the loop closure method called Sparse Pose Adju ...

  9. ORACLE删除分区

    业务需求:定期删除表中三个月之前的数据 说明:由于表采取一个月一个分区的设计,所以删除三个月之前的数据也就是删除三个月之前的分区.但需要注意的是删除分区后全局索引会失效,而本地local索引不会受到影 ...

  10. linux安装lamp/lamp/lanmp

    wdcp安装lamp/lanp/lanmp 和宝塔(centOS)1. yum install -y wget  //yum安装wegt2. wget http://dl.wdlinux.cn/fil ...