python version:    python2.7

需要安装得轮子:

boto
filechunkio command:
yum install python-pip&& pip install boto filechunkio ceph集群user(ceph-s3) 和 用户access_key,secret_key

代码:
#_*_coding:utf-8_*_
#yum install python-boto
import boto
import boto.s3.connection
#pip install filechunkio
from filechunkio import FileChunkIO
import math
import threading
import os
import Queue
import sys
class Chunk(object):
num = 0
offset = 0
len = 0
def __init__(self,n,o,l):
self.num=n
self.offset=o
self.length=l

#条件判断工具类
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False class CONNECTION(object):
def __init__(self,access_key,secret_key,ip,port,is_secure=False,chrunksize=8<<20): #chunksize最小8M否则上传过程会报错
self.conn=boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host=ip,port=port,
is_secure=is_secure,
calling_format=boto.s3.connection.OrdinaryCallingFormat()
)
self.chrunksize=chrunksize
self.port=port #查询buckets内files
def list_all(self):
all_buckets=self.conn.get_all_buckets()
for bucket in all_buckets:
print u'PG容器名: %s' %(bucket.name)
for key in bucket.list():
print ' '*5,"%-20s%-20s%-20s%-40s%-20s" %(key.mode,key.owner.id,key.size,key.last_modified.split('.')[0],key.name) #查询所有buckets
def get_show_buckets(self):
for bucket in self.conn.get_all_buckets():
print "Ceph-back-Name: {name}\tCreateTime: {created}".format(
name=bucket.name,
created=bucket.creation_date,
) def list_single(self,bucket_name):
try:
single_bucket = self.conn.get_bucket(bucket_name)
except Exception as e:
print 'bucket %s is not exist' %bucket_name
return
print u'容器名: %s' % (single_bucket.name)
for key in single_bucket.list():
print ' ' * 5, "%-20s%-20s%-20s%-40s%-20s" % (key.mode, key.owner.id, key.size, key.last_modified.split('.')[0], key.name) #普通小文件下载:文件大小<=8M
def dowload_file(self,filepath,key_name,bucket_name):
all_bucket_name_list = [i.name for i in self.conn.get_all_buckets()]
if bucket_name not in all_bucket_name_list:
print 'Bucket %s is not exist,please try again' % (bucket_name)
return
else:
bucket = self.conn.get_bucket(bucket_name) all_key_name_list = [i.name for i in bucket.get_all_keys()]
if key_name not in all_key_name_list:
print 'File %s is not exist,please try again' % (key_name)
return
else:
key = bucket.get_key(key_name) if not os.path.exists(os.path.dirname(filepath)):
print 'Filepath %s is not exists, sure to create and try again' % (filepath)
return if os.path.exists(filepath):
while True:
d_tag = raw_input('File %s already exists, sure you want to cover (Y/N)?' % (key_name)).strip()
if d_tag not in ['Y', 'N'] or len(d_tag) == 0:
continue
elif d_tag == 'Y':
os.remove(filepath)
break
elif d_tag == 'N':
return
os.mknod(filepath)
try:
key.get_contents_to_filename(filepath)
except Exception:
pass # 普通小文件上传:文件大小<=8M
def upload_file(self,filepath,key_name,bucket_name):
try:
bucket = self.conn.get_bucket(bucket_name)
except Exception as e:
print 'bucket %s is not exist' % bucket_name
tag = raw_input('Do you want to create the bucket %s: (Y/N)?' % bucket_name).strip()
while tag not in ['Y', 'N']:
tag = raw_input('Please input (Y/N)').strip()
if tag == 'N':
return
elif tag == 'Y':
self.conn.create_bucket(bucket_name)
bucket = self.conn.get_bucket(bucket_name)
all_key_name_list = [i.name for i in bucket.get_all_keys()]
if key_name in all_key_name_list:
while True:
f_tag = raw_input(u'File already exists, sure you want to cover (Y/N)?: ').strip()
if f_tag not in ['Y', 'N'] or len(f_tag) == 0:
continue
elif f_tag == 'Y':
break
elif f_tag == 'N':
return
key=bucket.new_key(key_name)
if not os.path.exists(filepath):
print 'File %s does not exist, please make sure you want to upload file path and try again' %(key_name)
return
try:
f=file(filepath,'rb')
data=f.read()
key.set_contents_from_string(data)
except Exception:
pass #删除bucket内file
def delete_file(self,key_name,bucket_name):
all_bucket_name_list = [i.name for i in self.conn.get_all_buckets()]
if bucket_name not in all_bucket_name_list:
print 'Bucket %s is not exist,please try again' % (bucket_name)
return
else:
bucket = self.conn.get_bucket(bucket_name) all_key_name_list = [i.name for i in bucket.get_all_keys()]
if key_name not in all_key_name_list:
print 'File %s is not exist,please try again' % (key_name)
return
else:
key = bucket.get_key(key_name) try:
bucket.delete_key(key.name)
except Exception:
pass #删除bucket
def delete_bucket(self,bucket_name):
all_bucket_name_list = [i.name for i in self.conn.get_all_buckets()]
if bucket_name not in all_bucket_name_list:
print 'Bucket %s is not exist,please try again' % (bucket_name)
return
else:
bucket = self.conn.get_bucket(bucket_name) try:
self.conn.delete_bucket(bucket.name)
except Exception:
pass #队列生成
def init_queue(self,filesize,chunksize): #8<<20 :8*2**20
chunkcnt=int(math.ceil(filesize*1.0/chunksize))
q=Queue.Queue(maxsize=chunkcnt)
for i in range(0,chunkcnt):
offset=chunksize*i
length=min(chunksize,filesize-offset)
c=Chunk(i+1,offset,length)
q.put(c)
return q #分片上传object
def upload_trunk(self,filepath,mp,q,id):
while not q.empty():
chunk=q.get()
fp=FileChunkIO(filepath,'r',offset=chunk.offset,bytes=chunk.length)
mp.upload_part_from_file(fp,part_num=chunk.num)
fp.close()
q.task_done() #文件大小获取---->S3分片上传对象生成----->初始队列生成(--------------->文件切,生成切分对象)
def upload_file_multipart(self,filepath,key_name,bucket_name,threadcnt=8):
filesize=os.stat(filepath).st_size
try:
bucket=self.conn.get_bucket(bucket_name)
except Exception as e:
print 'bucket %s is not exist' % bucket_name
tag=raw_input('Do you want to create the bucket %s: (Y/N)?' %bucket_name).strip()
while tag not in ['Y','N']:
tag=raw_input('Please input (Y/N)').strip()
if tag == 'N':
return
elif tag == 'Y':
self.conn.create_bucket(bucket_name)
bucket = self.conn.get_bucket(bucket_name)
all_key_name_list=[i.name for i in bucket.get_all_keys()]
if key_name in all_key_name_list:
while True:
f_tag=raw_input(u'File already exists, sure you want to cover (Y/N)?: ').strip()
if f_tag not in ['Y','N'] or len(f_tag) == 0:
continue
elif f_tag == 'Y':
break
elif f_tag == 'N':
return mp=bucket.initiate_multipart_upload(key_name)
q=self.init_queue(filesize,self.chrunksize)
for i in range(0,threadcnt):
t=threading.Thread(target=self.upload_trunk,args=(filepath,mp,q,i))
t.setDaemon(True)
t.start()
q.join()
mp.complete_upload() #文件分片下载
def download_chrunk(self,filepath,key_name,bucket_name,q,id):
while not q.empty():
chrunk=q.get()
offset=chrunk.offset
length=chrunk.length
bucket=self.conn.get_bucket(bucket_name)
resp=bucket.connection.make_request('GET',bucket_name,key_name,headers={'Range':"bytes=%d-%d" %(offset,offset+length)})
data=resp.read(length)
fp=FileChunkIO(filepath,'r+',offset=chrunk.offset,bytes=chrunk.length)
fp.write(data)
fp.close()
q.task_done() #下载 > 8MB file
def download_file_multipart(self,filepath,key_name,bucket_name,threadcnt=8):
all_bucket_name_list=[i.name for i in self.conn.get_all_buckets()]
if bucket_name not in all_bucket_name_list:
print 'Bucket %s is not exist,please try again' %(bucket_name)
return
else:
bucket=self.conn.get_bucket(bucket_name) all_key_name_list = [i.name for i in bucket.get_all_keys()]
if key_name not in all_key_name_list:
print 'File %s is not exist,please try again' %(key_name)
return
else:
key=bucket.get_key(key_name) if not os.path.exists(os.path.dirname(filepath)):
print 'Filepath %s is not exists, sure to create and try again' % (filepath)
return if os.path.exists(filepath):
while True:
d_tag = raw_input('File %s already exists, sure you want to cover (Y/N)?' % (key_name)).strip()
if d_tag not in ['Y', 'N'] or len(d_tag) == 0:
continue
elif d_tag == 'Y':
os.remove(filepath)
break
elif d_tag == 'N':
return
os.mknod(filepath)
filesize=key.size
q=self.init_queue(filesize,self.chrunksize)
for i in range(0,threadcnt):
t=threading.Thread(target=self.download_chrunk,args=(filepath,key_name,bucket_name,q,i))
t.setDaemon(True)
t.start()
q.join() #生成下载URL
def generate_object_download_urls(self,key_name,bucket_name,valid_time=0):
all_bucket_name_list = [i.name for i in self.conn.get_all_buckets()]
if bucket_name not in all_bucket_name_list:
print 'Bucket %s is not exist,please try again' % (bucket_name)
return
else:
bucket = self.conn.get_bucket(bucket_name) all_key_name_list = [i.name for i in bucket.get_all_keys()]
if key_name not in all_key_name_list:
print 'File %s is not exist,please try again' % (key_name)
return
else:
key = bucket.get_key(key_name) try:
key.set_canned_acl('public-read')
download_url = key.generate_url(valid_time, query_auth=False, force_http=True)
if self.port != 80:
x1=download_url.split('/')[0:3]
x2=download_url.split('/')[3:]
s1=u'/'.join(x1)
s2=u'/'.join(x2) s3=':%s/' %(str(self.port))
download_url=s1+s3+s2
print download_url except Exception:
pass #操作对象
class ceph_object(object):
def __init__(self,conn):
self.conn=conn def operation_cephCluster(self,filepath,command):
back = os.path.dirname(filepath).strip('/')
path = os.path.basename(filepath) for case in switch(command):
if case('delfile'):
print "正在删除:"+back+"__BACK文件夹中___FileName:"+path
self.conn.delete_file(path,back)
self.conn.list_single(back)
break
if case('push'): #未实现超过8MB文件上传判断
print "上传到:"+back+"__BACK文件夹中___FileName:"+path
self.conn.create_bucket(back)
self.conn.upload_file_multipart(filepath,path,back)
self.conn.list_single(back)
break
if case('pull'):#未实现超过8MB文件上传判断
print "下载:"+back+"back文件中___FileName:"++path
conn.download_file_multipart(filepath,path,back)
os.system('ls -al')
break
if case('delback'):
print "删除:"+back+"-back文件夹"
self.conn.delete_bucket(back)
self.conn.list_all()
break
if case('check'):
print "ceph-cluster所有back:"
self.conn.get_show_buckets()
break
if case('checkfile'):
self.conn.list_all()
break
if case('creaetback'):
self.conn.create_bucket(back)
self.conn.get_show_buckets()
break
if case(): # default, could also just omit condition or 'if True'
print "something else! input null"
# No need to break here, it'll stop anyway if __name__ == '__main__':
access_key = 'ZZNYFWKUQQD832IMIGJ2'
secret_key = '9eM9hIHt9q0XVNJ7WKhBPlC0hzUhOKhRhweHW8hO'
conn=CONNECTION(access_key,secret_key,'192.168.100.23',7480)
ceph_object(conn).operation_cephCluster(sys.argv[1], sys.argv[2]) #Linux 操作
# ceph_object(conn).operation_cephCluster('/my-first-s31-bucket/Linux.pdf','check')


在来张图吧:
当然你还可以用docker , docker-compose 搭一个owncloud 实现对ceph-cluster WEB Ui界面管理
docker-compose 编排代码如下:
version: ''
services:
owncloud:
image: owncloud
restart: always
links:
- mysql:mysql
volumes:
- "./owncloud-data/owncloud:/var/www/html/data"
ports:
- 80:80
mysql:
image: migs/mysql-5.7
restart: always
volumes:
- "./mysql-data:/var/lib/mysql"
ports:
- 3306:3306
environment:
MYSQL_ROOT_PASSWORD: ""
MYSQL_DATABASE: ownCloud

配置如下:

 最终效果

 

 

python2.7 操作ceph-cluster S3对象接口 实现: 上传 下载 查询 删除 顺便使用Docker装个owncloud 实现UI管理的更多相关文章

  1. Java 客户端操作 FastDFS 实现文件上传下载替换删除

    FastDFS 的作者余庆先生已经为我们开发好了 Java 对应的 SDK.这里需要解释一下:作者余庆并没有及时更新最新的 Java SDK 至 Maven 中央仓库,目前中央仓库最新版仍旧是 1.2 ...

  2. Python 自动化paramiko操作linux使用shell命令,以及文件上传下载linux与windows之间的实现

    # coding=utf8 import paramiko """ /* python -m pip install paramiko python version 3. ...

  3. AWS S3 API实现文件上传下载

    http://blog.csdn.net/marvin198801/article/details/47662965

  4. 如何利用京东云的对象存储(OSS)上传下载文件

    作者:刘冀 在公有云厂商里都有对象存储,京东云也不例外,而且也兼容S3的标准因此可以利用相关的工具去上传下载文件,本文主要记录一下利用CloudBerry Explorer for Amazon S3 ...

  5. Django 08 Django模型基础3(关系表的数据操作、表关联对象的访问、多表查询、聚合、分组、F、Q查询)

    Django 08 Django模型基础3(关系表的数据操作.表关联对象的访问.多表查询.聚合.分组.F.Q查询) 一.关系表的数据操作 #为了能方便学习,我们进入项目的idle中去执行我们的操作,通 ...

  6. java微信接口之四—上传素材

    一.微信上传素材接口简介 1.请求:该请求是使用post提交地址为: https://api.weixin.qq.com/cgi-bin/media/uploadnews?access_token=A ...

  7. jm解决乱码问题-参数化-数据库操作-文件上传下载

    jm解决乱码问题-参数化-数据库操作-文件上传下载 如果JM出果运行结果是乱码(解决中文BODY乱码的问题) 找到JM的安装路径,例如:C:\apache-jmeter-3.1\bin 用UE打开jm ...

  8. 配置允许匿名用户登录访问vsftpd服务,进行文档的上传下载、文档的新建删除等操作

    centos7环境下 临时关闭防火墙 #systemctl stop firewalld 临时关闭selinux #setenforce 0 安装ftp服务 #yum install vsftpd - ...

  9. SFTP上传下载文件、文件夹常用操作

    SFTP上传下载文件.文件夹常用操作 1.查看上传下载目录lpwd 2.改变上传和下载的目录(例如D盘):lcd  d:/ 3.查看当前路径pwd 4.下载文件(例如我要将服务器上tomcat的日志文 ...

随机推荐

  1. subprocess.Popen()

    def run(): str_shell='ipconfig' sub=subprocess.Popen(args=str_shell,shell=True,stdin=subprocess.PIPE ...

  2. 「学习笔记」FFT 快速傅里叶变换

    目录 「学习笔记」FFT 快速傅里叶变换 啥是 FFT 呀?它可以干什么? 必备芝士 点值表示 复数 傅立叶正变换 傅里叶逆变换 FFT 的代码实现 还会有的 NTT 和三模数 NTT... 「学习笔 ...

  3. python中的 dir()内置函数的作用以及使用方法

    dir() 内置函数的作用 python 内置方法有很多,无论是初学者还是精通python 的程序员都不能全部即住所有的方法,这时候 dir() 方法就非常有用了,使用 dir()函数可以查看对象内的 ...

  4. [转]利用 Commons-Fileupload 实现文件上传

    转载 Java Web开发人员可以使用Apache文件上传组件来接收浏览器上传的文件,该组件由多个类共同组成,但是,对于使用该组件来编写文件上传功能的Java Web开发人员来说,只需要了解和使用其中 ...

  5. 12 JavaScript String对象 & Date对象

    <script> var a = "string"; var b = new String("string"); var c = new Strin ...

  6. Linux创建智能DNS

    根据客户端源IP地址的不同,DNS服务提供不同的解析地址 1.安装dns服务,修改全局配置文件/etc/named.conf # yum -y install bind # vim /etc/name ...

  7. npm和npx

    npx 指令会先在项目的node_modules里面找资源包 npm info 包名称  [查看资源包的信息]

  8. 「国家集训队」小Z的袜子

    「国家集训队」小Z的袜子 传送门 莫队板子题. 注意计算答案的时候,由于分子分母都要除以2,所以可以直接约掉,这样在开桶算的时候也方便一些. 参考代码: #include <algorithm& ...

  9. CSS - 去除图片img底侧空白缝隙

    1. 图片底部有空隙 <!DOCTYPE html> <html lang="en"> <head> <meta charset=&quo ...

  10. mysql将一个表拆分成多个表(一)(转载)

    转载 直接根据数据量进行拆分 有一个5000条数据的表,要把它变成没1000条数据一个表的5等份. 假设:表名:xuesi 主键:kidxuesi共有5000条数据,kid从1到5000自动增长题目: ...