最近公司准备接ceph储存,研究了一番,准备用亚马逊的s3接口实现,实现类如下:

/**
* Title: S3Manager
* Description: Ceph储存的s3接口实现,参考文档:
* https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/dev/RetrievingObjectUsingJava.html
* http://docs.ceph.org.cn/radosgw/s3/
* author: xu jun
* date: 2018/10/22
*/
@Slf4j
@Service
public class S3Manager extends StorageManagerBase implements StorageManager {
private final UKID ukid;
private final S3ClientConfig s3ClientConfig;
private final RedisManage redisManage;
private AmazonS3 amazonClient; @Autowired
public S3Manager(UKID ukid, S3ClientConfig s3ClientConfig, RedisManage redisManage) {
this.ukid = ukid;
this.s3ClientConfig = s3ClientConfig;
this.redisManage = redisManage;
} private AmazonS3 getAmazonClient() {
if (amazonClient == null) {
String accessKey = s3ClientConfig.getAccessKey();
String secretKey = s3ClientConfig.getSecretKey();
String endpoint = s3ClientConfig.getEndPoint(); AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol(Protocol.HTTP); AmazonS3 conn = AmazonS3ClientBuilder.standard()
.withClientConfiguration(clientConfig)
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, ""))
.withPathStyleAccessEnabled(true)
.build(); //检查储存空间是否创建
checkBucket(conn);
amazonClient = conn;
}
return amazonClient;
} @Override
public String uploadFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload file start"); //生成上传文件的随机序号
long fileId = ukid.getGeneratorID();
String fileName = Long.toString(fileId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient(); PutObjectResult result = conn.putObject(bucketName, fileName, new ByteArrayInputStream(fileData), null);
log.info("Storage s3 api, put object result :{}", result); log.info("Storage s3 api, upload file end, file name:" + fileName);
return fileName;
} @Override
public String uploadAppenderFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload appender file start"); //生成上传文件的随机序号
long ukId = ukid.getGeneratorID();
String fileName = Long.toString(ukId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
List<PartETag> partETags = new ArrayList<>();
//初始化分片上传
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, fileName);
InitiateMultipartUploadResult initResponse = conn.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(fileData);
Integer contentLength = fileData.length;
// 文件上传
UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber()
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream);
UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
}
partETags.add(uploadPartResult.getPartETag());
Integer partNumber = uploadPartResult.getPartNumber(); S3CacheMode cacheMode = new S3CacheMode();
cacheMode.setPartETags(partETags);
cacheMode.setPartNumber(partNumber);
cacheMode.setUploadId(uploadId);
redisManage.set(fileName, cacheMode); log.info("Storage s3 api, upload appender file end, fileName: {}", fileName);
return fileName;
} @Override
public void uploadChunkFile(ChunkFileSaveParams chunkFileSaveParams) {
log.info("Storage s3 api, upload chunk file start"); String fileName = chunkFileSaveParams.getFileAddress();
Result result = redisManage.get(fileName);
JSONObject jsonObject = (JSONObject) result.getData();
if (jsonObject == null) {
throw FileCenterExceptionConstants.CACHE_DATA_NOT_EXIST;
}
S3CacheMode cacheMode = jsonObject.toJavaObject(S3CacheMode.class);
Integer partNumber = cacheMode.partNumber;
String uploadId = cacheMode.getUploadId();
List<PartETag> partETags = cacheMode.partETags; //储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(chunkFileSaveParams.getBytes());
Integer contentLength = chunkFileSaveParams.getBytes().length; UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber(partNumber + )
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream); UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest);
partETags.add(uploadPartResult.getPartETag());
partNumber = uploadPartResult.getPartNumber(); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} S3CacheMode cacheModeUpdate = new S3CacheMode();
cacheModeUpdate.setPartETags(partETags);
cacheModeUpdate.setPartNumber(partNumber);
cacheModeUpdate.setUploadId(uploadId);
redisManage.set(fileName, cacheModeUpdate); if (chunkFileSaveParams.getChunk().equals(chunkFileSaveParams.getChunks() - )) {
//完成分片上传,生成储存对象
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, fileName,
uploadId, partETags);
conn.completeMultipartUpload(compRequest);
} log.info("Storage s3 api, upload chunk file end");
} @Override
public byte[] downloadFile(String fileName) {
log.info("Storage s3 api, download file start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
object = conn.getObject(bucketName, fileName);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.debug("Storage s3 api, get object result :{}", object); byte[] fileByte;
InputStream inputStream;
inputStream = object.getObjectContent();
try {
fileByte = IOUtils.toByteArray(inputStream);
inputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
}
log.info("Storage s3 api, download file end");
return fileByte;
} @Override
public byte[] downloadFile(String fileName, long fileOffset, long fileSize) {
log.info("Storage s3 api, download file by block start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, fileName)
.withRange(fileOffset, fileOffset + fileSize);
//范围下载。
object = conn.getObject(getObjectRequest);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.info("Storage s3 api, get object result :{}", object); // 读取数据。
byte[] buf;
InputStream in = object.getObjectContent();
try {
buf = inputToByte(in, (int) fileSize);
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
try {
in.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
log.info("Storage s3 api, download file by block end");
return buf;
} @Override
public String fileSecret(String filePath) {
return null;
} @Override
public String fileDecrypt(String filePath) {
return null;
} @Override
public String getDomain() {
return null;
} /**
* 检查储存空间是否已创建
*
* @param conn 客户端连接
*/
private void checkBucket(AmazonS3 conn) {
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
if (conn.doesBucketExist(bucketName)) {
log.debug("Storage s3 api, bucketName is found: " + bucketName);
} else {
log.warn("Storage s3 api, bucketName is not exist, create it: " + bucketName);
conn.createBucket(bucketName);
}
} /**
* inputStream转byte[]
*
* @param inStream 输入
* @param fileSize 文件大小
* @return 输出
* @throws IOException 异常
*/
private static byte[] inputToByte(InputStream inStream, int fileSize) throws IOException {
ByteArrayOutputStream swapStream = new ByteArrayOutputStream();
byte[] buff = new byte[fileSize];
int rc;
while ((rc = inStream.read(buff, , fileSize)) > ) {
swapStream.write(buff, , rc);
}
return swapStream.toByteArray();
} /**
* 调试用的方法,可以在控制台看到io的数据
*
* @param input 输入
* @throws IOException 异常
private static void displayTextInputStream(InputStream input) throws IOException {
// Read the text input stream one line at a time and display each line.
BufferedReader reader = new BufferedReader(new InputStreamReader(input));
String line;
while ((line = reader.readLine()) != null) {
log.info(line);
}
}
*/
}

业务接口要实现包括分片上传(支持断点续传)、分片下载等功能,上面类是底层类不包含业务逻辑。

maven依赖:

        <!-- ceph储存的接口 -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId>
<version>1.11.</version>
</dependency>

开发感受:

  1.ceph官网上提供的s3接口文档(java版),内容又少又旧,已经基本不能当做参考了。所以API和代码示例要去亚马逊官网上看(提供了中文版,好评)

  2.s3接口本身不提供文件追加储存的功能。所以在实现分片上传的时候,比较麻烦(不想fastDFS和OSS那么方便)

  3.分片上传默认最小限制是5M,要修改可以在服务器配置上做

  4.如果使用域名做端点的话,默认会把bucket的名字,作为子域名来访问(需要域名解析,所以不建议)。如果想作为路径来访问,需要在连接配置中指定。

ceph储存的S3接口实现(支持断点续传)的更多相关文章

  1. 基于LAMP php7.1搭建owncloud云盘与ceph对象存储S3借口整合案例

    ownCloud简介 是一个来自 KDE 社区开发的免费软件,提供私人的 Web 服务.当前主要功能包括文件管理(内建文件分享).音乐.日历.联系人等等,可在PC和服务器上运行. 简单来说就是一个基于 ...

  2. 使用COSBench工具对ceph s3接口进行压力测试--续

    之前写的使用COSBench工具对ceph s3接口进行压力测试是入门,在实际使用是,配置内容各不一样,下面列出 压力脚本是xml格式的,套用UserGuide文档说明,如下 有很多模板的例子,在co ...

  3. 使用COSBench工具对ceph s3接口进行压力测试

    一.COSBench安装 COSBench是Intel团队基于java开发,对云存储的测试工具,全称是Cloud object Storage Bench 吐槽下,貌似这套工具是intel上海团队开发 ...

  4. 分布式存储系统之Ceph集群访问接口启用

    前文我们使用ceph-deploy工具简单拉起了ceph底层存储集群RADOS,回顾请参考https://www.cnblogs.com/qiuhom-1874/p/16724473.html:今天我 ...

  5. FTP文件上传 支持断点续传 并 打印下载进度(二) —— 单线程实现

    这个就看代码,哈哈哈哈哈  需要用到的jar包是: <dependency> <groupId>commons-net</groupId> <artifact ...

  6. web大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  7. java大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  8. 【FTP】FTP文件上传下载-支持断点续传

    Jar包:apache的commons-net包: 支持断点续传 支持进度监控(有时出不来,搞不清原因) 相关知识点 编码格式: UTF-8等; 文件类型: 包括[BINARY_FILE_TYPE(常 ...

  9. 【SFTP】使用Jsch实现Sftp文件下载-支持断点续传和进程监控

    参考上篇文章: <[SFTP]使用Jsch实现Sftp文件下载-支持断点续传和进程监控>:http://www.cnblogs.com/ssslinppp/p/6248763.html  ...

随机推荐

  1. python学习之旅(十六)

    Python基础知识(15):模块 1.可以把模块想象成导入Python以增强其功能的扩展 2.任何程序都可以作为模块导入 3.导入模块并不意味着在导入的时候执行某些操作,它们主要用于定义变量.函数和 ...

  2. python学习之旅(四)

    Python基础知识(3):基本数据类型之数字 一.基本数据类型 数字Number.字符串String.列表List.元组Tuple.集合Set.字典Dictionary 二.数字 Python3支持 ...

  3. 浅谈提高Django性能

    Django性能优化是一件困难的事情,但是也不常常如此: 下面4步将能够轻松的提高你的网站的性能,它们非常简单你应该将它们 作为标配. 持久化数据库连接 django1.6以后已经内置了数据库持久化连 ...

  4. 2018-2019-2 网络对抗技术 20165318 Exp2 后门原理与实践

    2018-2019-2 网络对抗技术 20165318 Exp2 后门原理与实践 后门的基本概念及基础问题回答 常用后门工具 netcat Win获得Linux Shell Linux获得Win Sh ...

  5. npm 镜像的问题

    1> cnpm(不推荐) npm install -g cnpm --registry=https://registry.npm.taobao.org 2> 推荐第二种 npm confi ...

  6. Linux是cat、tail、head查看文件任意几行的数据

    Linux是cat.tail.head查看文件任意几行的数据 一.使用cat.tail.head组合 1.查看最后100行的数据 cat filename | tail -n 100 2.查看100到 ...

  7. Date Calendar

    1 毫秒值概念 时间和日期的计算,必须依赖毫秒值获取当前日期的毫秒值:System.currentTimeMillis() 返回值long类型参数, 时间原点:公元1970年1月1日,午夜0:00:0 ...

  8. SqlServer 行转列,列转行 以及PIVOT函数快速实现行转列,UNPIVOT实现列转行

     一   .列转行 创建所需的数据 CREATE TABLE [StudentScores]( [UserName] NVARCHAR(20), --学生姓名 [Subject] NVARCHAR(3 ...

  9. 从零开始一起学习SLAM | 理解图优化,一步步带你看懂g2o代码

    首发于公众号:计算机视觉life 旗下知识星球「从零开始学习SLAM」 这可能是最清晰讲解g2o代码框架的文章 理解图优化,一步步带你看懂g2o框架 小白:师兄师兄,最近我在看SLAM的优化算法,有种 ...

  10. python将字符串转换成整型

    将字符串转换成,整型,从字面理解很容易让人误会. 比如,要把这个"abcabc"转换成整型,臣妾做不到啊.除成转成ascii. 我们所说字符串转成整型是这样的. s = " ...