在数据库设计完成之后, 常常需要在 wiki 或其他文档中保存一份数据库中所有表的 desc 描述, 尤其是每个字段的含义和用途。 手动去生成自然是不可取的。 因此, 我编写了一个简单的 python 程序,可以自动生成数据库中所有表的 desc 描述, 并以可读格式输出。

# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: db_tables_descs.py
# Purpose: generate the tables that describe the meanings of fields in db
#
# Author: qin.shuq
#
# Created: 2014/11/17
# Output: desc.txt
# recording the tables that describe the meanings of fields in db
#-------------------------------------------------------------------------------
#!/usr/bin/env python import db globalFieldDescs = ('Field', 'Type', 'Null', 'Key', 'Default', 'Extra') globalDescFile = 'desc.txt' conflictedWithMysqlKeywords = set(['group']) fieldDescMapping = {
'id': '唯一标识',
'is_deleted': '是否逻辑删除',
'status': '实体状态',
'type': '实体类型',
'priority': '优先级',
'password': '密码',
'ip': 'ip 地址',
'mac': 'mac 地址',
'protocol': '访问协议',
'user_id': '用户唯一标识'
} def formatCols(fieldDesc):
return "%-16s %-24s %-5s %-8s %-8s %-30s" % fieldDesc def withNewLine(astr):
return astr + '\n' def commonFieldsProcess(fieldDescList):
fieldName = fieldDescList[0]
fieldDesc = fieldDescMapping.get(fieldName)
desclen = len(fieldDescList)
if fieldDesc is None:
if fieldName.startswith('gmt_c'):
fieldDesc = '创建时间'
elif fieldName.startswith('gmt_m'):
fieldDesc = '修改时间'
else:
fieldDesc = fieldDescList[desclen-1]
fieldDescList[desclen-1] = fieldDesc def formatF(fieldDescTuple):
fieldDescList = list(fieldDescTuple)
fieldLen = len(fieldDescList)
for i in range(fieldLen):
if fieldDescList[i] is None:
fieldDescList[i] = 'NULL'
else:
fieldDescList[i] = str(fieldDescList[i])
commonFieldsProcess(fieldDescList)
return formatCols(tuple(fieldDescList)) def format(tableDesc):
desc = ''
for fieldDescTuple in tableDesc:
desc += withNewLine(formatF(fieldDescTuple))
return desc def descDb(givenDb):
tablesRet = givenDb.query("show tables;")
tableNames = [table[0] for table in tablesRet]
desc = u''
for tablename in tableNames:
if tablename in conflictedWithMysqlKeywords:
tablename = '`' + tablename + '`'
descSql = "desc " + tablename
tableDesc = givenDb.query(descSql)
desc += withNewLine(tablename)
desc += withNewLine(formatCols(globalFieldDescs)).decode('utf-8')
desc += withNewLine(format(tableDesc)).decode('utf-8')
desc += withNewLine('').decode('utf-8')
return desc def main(): descFile = open(globalDescFile, 'w') desc = descDb(db.Mydb())
descFile.write(desc.encode('utf-8')) descFile.close() if __name__ == '__main__':
main()

db.py

#!/usr/ali/bin/python
# coding=utf-8 '''Implements a database api to your db. Example 1: Query SQL a. Use execute() method to execute query sql: db.execute('select * from ip') # Get only the first two rows
db.get_rows(2)
# result like [('10.10.0.1', 'my'), ..] # Get the next two rows, but each row record is a dict
db.get_rows(2, is_dict = True)
# result like [{'address':'10.10.0.1', 'name': 'my'}, ..] b. Use query() method to execute query sql directly: # The query() method will get the result rows immediately
db.query('select * from ip', size = 2, is_dict = True) c. Use split_query() method to split long query into small ones: # Assume that the name_list's length is 10000
# See the docstring of split_query() for more details
db.split_query('select address from ip', 'name', name_list) Example 2: Insert SQL a. Insert a new record into ip table: db.execute("insert into ip('address','name') values('192.168.0.1','vm-xxx')") # If auto commit set to false, call commit() method manually
db.commit() b. Insert multi-records into ip table: db.executemany("insert into ip('address','name') values(%s,%s)", [
('192.168.0.1', 'vm-xxx'),
('192.168.0.2', 'vm-yyy'),
('192.168.0.3', 'vm-zzz')])
db.commit() Note: db.multi_insert is an alias for executemany method.
See test_main() method for more examples.
''' from database import DB class Mydb(DB):
'''A simple query interface of a specific database.'''
def __init__(self, read_only = True,
auto_commit = False, timeout = 5, auto_connect = False,
max_idle_time = 28800):
'''Initialize the database access object.'''
# Get the database parameters
args = {'host':'127.0.0.1', 'user':'root','passwd':'','db':'mysql','port':3306,'charset':'utf8'} # Set extra connection parameters
args['connect_timeout'] = timeout
args['auto_commit'] = auto_commit
args['max_idle_time'] = max_idle_time
args['auto_connect'] = auto_connect DB.__init__(self, **args)

database.py

#!/usr/ali/bin/python
# coding=utf-8 '''Implements a simple database interface Example 0: Create connection: # Set auto commit to false, default case
db = DB(auto_commit = False, host = 'x', user = 'x', passwd = 'x', db = 'x')
# Set auto commit to true
db = DB(auto_commit = True, host = 'x', user = 'x', passwd = 'x', db = 'x')
# Set auto connect to true, this will set auto commit to true too
# This will enable auto connect when the connection is timeout
db = DB(auto_connect = True, host = 'x', user = 'x', passwd = 'x', db = 'x') Example 1: Query SQL a. Use query() method to execute query sql directly: # The query() method will get the result rows immediately
db.query('select * from ip', size = 2, is_dict = True) c. Use split_query() method to split long query into small ones: # Assume that the name_list's length is 10000
# See the docstring of split_query() for more details
db.split_query('select address from ip', 'name', name_list) Example 2: Insert SQL a. Insert a new record into ip table: db.execute("insert into ip('address','name') values('192.168.0.1','vm-xxx')") # If auto commit set to false, call commit() method manually
db.commit() b. Insert multi-records into ip table: db.executemany("insert into ip('address','name') values(%s,%s)", [
('192.168.0.1', 'vm-xxx'),
('192.168.0.2', 'vm-yyy'),
('192.168.0.3', 'vm-zzz')])
db.commit() Note: db.multi_insert is an alias for executemany method.
''' # Can be 'Prototype', 'Development', 'Product'
__status__ = 'Development'
__author__ = 'tuantuan.lv <tuantuan.lv@alibaba-inc.com>' import re
import time
import MySQLdb from storage import Storage OperationalError = MySQLdb.OperationalError def _format(sql):
'''Format the sql.'''
return ' '.join(sql.split()) class DB():
'''A simple database query interface.'''
def __init__(self, auto_commit = False, auto_connect = False,
max_idle_time = 28800, **kwargs):
'''Initialize the DB object.'''
#
# Remember the max idle time (default: 28800)
# You should set this value to mysql option 'wait_timeout'
#
# mysql> show variables like 'wait_timeout';
# +---------------+-------+
# | Variable_name | Value |
# +---------------+-------+
# | wait_timeout | 28800 |
# +---------------+-------+
#
self.max_idle_time = max_idle_time kwargs.setdefault('charset', 'utf8') # set default charset to utf8
kwargs['port'] = int(kwargs.get('port', '')) # set default port to 3306 self._db = None # MySQLdb connection object
self._db_cursor = None # MySQLdb cursor object
self.cursor = None # MySQLdb cursor object, deprecated
self._db_args = kwargs # MySQL db connection args
self._last_use_time = time.time() # Last active time self._auto_connect = auto_connect # Auto connect when timeout
self._auto_commit = auto_commit # Auto commit # Open a new mysql connection
self._reconnect() def __del__(self):
self.close() def close(self):
'''Close the database connection.'''
if self._db is not None:
self._db_cursor.close()
self._db.close()
self._db = None def _reconnect(self):
'''Close existing connection and re-open a new one.'''
self.close()
self._db = MySQLdb.connect(**self._db_args) # Override auto commit setting if auto connect is true
if self._auto_connect:
self._db.autocommit(True)
else:
self._db.autocommit(self._auto_commit) self._db_cursor = self._db.cursor()
self.cursor = self._db_cursor def _ensure_connected(self):
'''Ensure we connect to mysql.'''
# Mysql by default closes client connections that are idle for
# 8 hours, but the client library does not report this fact until
# you try to perform a query and it fails. Protect against this
# case by preemptively closing and reopening the connection
# if it has been idle for too long (8 hours by default).
if (self._db is None or
(time.time() - self._last_use_time > self.max_idle_time)):
self._reconnect() self._last_use_time = time.time() def _cursor(self):
'''Get the cursor.'''
if self._auto_connect:
self._ensure_connected() return self._db_cursor def execute(self, sql, args = None):
'''Execute a sql and return the affected row number. You should call the get_rows method to fetch the rows manually.
'''
cursor = self._cursor()
return cursor.execute(_format(sql), args) def execute_lastrowid(self, sql, args = None):
'''Execute a sql and return the last row id. You should call the get_rows method to fetch the rows manually.
'''
cursor = self._cursor()
cursor.execute(_format(sql), args) return cursor.lastrowid def executemany(self, sql, args):
'''Execute a multi-row insert. You can use this method to do a multi-row insert: c.executemany(
"""INSERT INTO breakfast (name, spam, eggs, sausage, price)
VALUES (%s, %s, %s, %s, %s)""",
[
("Spam and Sausage Lover's Plate", 5, 1, 8, 7.95 ),
("Not So Much Spam Plate", 3, 2, 0, 3.95 ),
("Don't Wany ANY SPAM! Plate", 0, 4, 3, 5.95 )
] ) See http://mysql-python.sourceforge.net/MySQLdb.html for more help.
'''
cursor = self._cursor()
return cursor.executemany(_format(sql), args) # Execute a multi-row insert, the same as executemany()
multi_insert = executemany def get_rows(self, size = None, is_dict = False):
'''Get the result rows after executing.'''
cursor = self._cursor()
description = cursor.description if size is None:
rows = cursor.fetchall()
else:
rows = cursor.fetchmany(size) if rows is None:
rows = [] if is_dict:
dict_rows = []
dict_keys = [ r[0] for r in description ] for row in rows:
dict_rows.append(Storage(zip(dict_keys, row))) rows = dict_rows return list(rows) def query(self, sql, args = None, size = None, is_dict = False):
'''Execute a query sql and return the rows immediately.'''
self.execute(sql, args)
return self.get_rows(size, is_dict) # Alias of query() method
select = query def split_query(self, sql, in_attr, in_list, max_cnt = 3000):
'''Split one long query into many small ones. For example, if you want to select the records whose attrname is in
one long list (larger than 8000) of possible values. If you decide to
use 'attr in (...)' syntax, the length will exceed the maximum length
of one sql allowed. In this case you must split the long query into many
small ones. in_attr is the attribute name of in operator, and in_list is the possible
value list. max_cnt is the maximum count of values in one small query.
'''
total = len(in_list) start = 0
end = max_cnt result = [] if re.search(r'\bwhere\b', sql.lower()):
#if sql.lower().find('where ') != -1 or sql.lower().find('where\n') != -1:
sql = '%s and %s in %%s' % (sql, in_attr)
else:
sql = '%s where %s in %%s' % (sql, in_attr) while start < total:
if end < total:
in_expr = "('%s')" % "','".join(in_list[start:end])
else:
in_expr = "('%s')" % "','".join(in_list[start:]) result.extend(self.query(sql % in_expr)) start = end
end += max_cnt return result #def get_autoincrement_id(self, tbl):
# '''Get the next auto increment id of table.
#
# Return None if the table doesn't have an auto-increment id.
# '''
# self.execute('SHOW TABLE STATUS LIKE %s', (tbl,))
# result = self.get_rows(is_dict = True) # if result[0]:
# return result[0]['Auto_increment']
# else:
# return None def commit(self):
'''Commits the current transaction.'''
if self._db is not None:
self._db.commit() def rollback(self):
'''Rollback the last transaction.'''
if self._db is not None:
self._db.rollback() # vim: set expandtab smarttab shiftwidth=4 tabstop=4:

storage.py

#!/usr/ali/bin/python
# coding=utf-8 '''Wrap an existing dict, or create a new one, and access with dot notation See test_main() for more example.
''' # Can be 'Prototype', 'Development', 'Product'
__status__ = 'Development'
__author__ = 'tuantuan.lv <tuantuan.lv@alibaba-inc.com>' # Taken from http://stackoverflow.com/a/12187277
class Storage(object):
'''Wrap an existing dict, or create a new one, and access with dot notation. The attribute _data is reserved and stores the underlying dictionary. args:
d: Existing dict to wrap, an empty dict created by default.
create: Create an empty, nested dict instead of raising a KeyError.
'''
def __init__(self, d = None, create = True):
'''Initialize storage object.'''
if d is None: # Create empty storage object
d = {}
else: # create as a dictionary?
d = dict(d) # Set storage attributes
self.__dict__['__storage_data'] = d
self.__dict__['__storage_create'] = create def __getattr__(self, name):
'''Get the key value.'''
try:
value = self.__dict__['__storage_data'][name] except KeyError:
# Create empty storage value if auto-create set to true
if not self.__dict__['__storage_create']:
raise value = {}
self.__dict__['__storage_data'][name] = value # Create nested dict if the value has items attribute
if isinstance(value, dict):
value = Storage(value, self.__dict__['__storage_create'])
self.__dict__['__storage_data'][name] = value return value def __setattr__(self, name, value):
'''Set the storage key to value'''
self.__dict__['__storage_data'][name] = value def __delattr__(self, name):
'''Delete the storage key.'''
del self.__dict__['__storage_data'][name] def __contains__(self, name):
'''Check whether the key exists.'''
return name in self.__dict__['__storage_data'] def __nonzero__(self):
'''Check whether the storage is empty.'''
return bool(self.__dict__['__storage_data']) # Defines common dict api
__getitem__ = __getattr__
__setitem__ = __setattr__
__delitem__ = __delattr__ def get(self, name, default = None):
'''Defines an get method.'''
return self.__dict__['__storage_data'].get(name, default) # Define dictionary like methods
def keys(self):
return self.__dict__['__storage_data'].keys() def items(self):
return self.__dict__['__storage_data'].items() def values(self):
return self.__dict__['__storage_data'].values() def setdefault(self, name, default = None):
return self.__dict__['__storage_data'].setdefault(name, default) def pop(self, name, *args):
return self.__dict__['__storage_data'].pop(name, *args) def update(self, d, **kwargs):
return self.__dict__['__storage_data'].update(d, **kwargs) def clear(self):
self.__dict__['__storage_data'].clear() def __len__(self):
return len(self.__dict__['__storage_data']) def __iter__(self):
return self.__dict__['__storage_data'].__iter__() def __unicode__(self):
return u'<Storage %s>' % str(self.__dict__['__storage_data']) def __str__(self):
return '<Storage %s>' % str(self.__dict__['__storage_data']) def __repr__(self):
return '<Storage %s>' % repr(self.__dict__['__storage_data']) def test_main():
# Create an empty storage
d1 = Storage()
d1.a.b = 1
d1.b.c = 2 # Iterate the items in storage object
for k, v in d1.items():
print k, v # Create a storage in a (key,value) tuple
d3 = Storage(zip(['a','b','c'], [1,2,3]))
print d3.a, d3.b, d3.c
print d3 # Create a storage from a existing dict
d4 = Storage({'a':{'b':1}})
print d4.a.b
print d4 # Check the attribute
d5 = Storage()
print 'a' in d5 # False
print d5.a # create attribute 'a'
print 'a' in d5 # True
print d5.get('c')
print d5.get('d', 3) d5 = Storage(create = False)
print 'a' in d5 # False
print d5.get('a', 5)
print d5.a # raise KeyError
print 'a' in d5 # False, also if __name__ == '__main__':
test_main()

python生成数据库中所有表的DESC描述的更多相关文章

  1. Hibernate 由实体类与配置文件的配置关系生成数据库中的表

    import org.hibernate.cfg.Configuration; import org.hibernate.tool.hbm2ddl.SchemaExport; public class ...

  2. 获取SQL Server数据库中的表和字段描述

    获取所有dbo表的扩展属性: SELECT * FROM fn_listextendedproperty (NULL, 'schema', 'dbo', 'table', default, NULL, ...

  3. 通过jdbc获取数据库中的表结构 主键 各个表字段类型及应用生成实体类

    http://www.cnblogs.com/lbangel/p/3487796.html 1.JDBC中通过MetaData来获取具体的表的相关信息.可以查询数据库中的有哪些表,表有哪些字段,字段的 ...

  4. 通过数据库中的表,使用 MyEclipse2017的反向生成工具-->hibernate反转引擎引擎(MyEclipse2017自带的插件) 来反转生成实体类和对应的映射文件

    通过数据库中的表,使用 MyEclipse2017的反向生成工具-->hibernate反转引擎引擎(MyEclipse2017自带的插件) 来反转生成实体类和对应的映射文件   文章目录 Ja ...

  5. 分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间)

    分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间) 很多时候我们都需要计算数据库中各个表的数据量和每行记录所占用空间 这里共享一个脚本 CREATE TABLE #tab ...

  6. 清空SQL Server数据库中所有表数据的方法(转)

    清空SQL Server数据库中所有表数据的方法 其实删除数据库中数据的方法并不复杂,为什么我还要多此一举呢,一是我这里介绍的是删除数据库的所有数据,因为数据之间可能形成相互约束关系,删除操作可能陷入 ...

  7. 通过jdbc获取数据库中的表结构

    通过jdbc获取数据库中的表结构 主键 各个表字段类型及应用生成实体类   1.JDBC中通过MetaData来获取具体的表的相关信息.可以查询数据库中的有哪些表,表有哪些字段,字段的属性等等.Met ...

  8. (转)分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间)

    分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间) 很多时候我们都需要计算数据库中各个表的数据量和每行记录所占用空间 这里共享一个脚本 CREATE TABLE #tab ...

  9. 【Sqlserver清空数据库中所有表数据】

    脚本: CREATE PROCEDURE sp_DeleteAllData AS EXEC sp_MSForEachTable 'ALTER TABLE ? NOCHECK CONSTRAINT AL ...

随机推荐

  1. groovy

    1.加载和卸载(每次都新建一个GroovyClassLoader 实例,然后使用新建的classloader去加载) try { GroovyClassLoader groovyClassLoader ...

  2. The Secrets of Oracle Row Chaining and Migration

    from http://www.akadia.com/services/ora_chained_rows.html Overview If you notice poor performance in ...

  3. 网页加载图片问题 插件lazyload

    有些项目的,是满屏的背景图片 ,导致页面加载的速度,有简单处理的方法有两个: 1.将背景分割成几分

  4. MvvmLight 绑定

    添加MvvmLight引用,通过Nuget: 加载nuget以后会有ViewModelLocator.cs: 新建自己的ViewModel,继承ViewModelBase: View 通过资源引用Vi ...

  5. fakeLoader.js-针对WebApp中的 “假”预加载

    在做移动端网站的时候,前端有时候需要一些过渡效果,当然我们肯定首先想到肯定是用css3做一个过渡动画,nice,那我给大家一个好用的jQuery插件吧,让你三行代码实现这种过渡动画效果. 1.由于该插 ...

  6. Mysql 5.7.7

    1.安装Mysql(需要管理员权限) 2.启动Mysql 3.连接Mysql Mysql刚安装成功后可输入 mysql -u root -p ,然后回车,提示输入密码,由于是第一次连接,不用输入密码也 ...

  7. javascript设计模式学习之十五——装饰者模式

    一.装饰者模式定义 装饰者模式可以动态地给某个对象添加一些额外的职责,而不会影响从这个类中派生的其他对象.这种为对象动态添加职责的方式就称为装饰者模式.装饰者对象和它所装饰的对象拥有一致的接口,对于用 ...

  8. swift 同步加载图片

    import UIKit @UIApplicationMain class AppDelegate: UIResponder, UIApplicationDelegate { var window: ...

  9. Windows2000安装Winform Clickonce提示升级系统版本的解决方案

    Windows2000安装Winform Clickonce提示升级系统版本.只需要把所有应用的DLL的独立性设置为false就可以了.

  10. curl命令使用(转)

    转自:http://www.cnblogs.com/sunada2005/p/3829772.html curl命令可以用来构造http请求.参数有很多,常用的参数如下: 通用语法:curl [opt ...