This commit is contained in:
15309518018 2024-03-21 17:12:54 +08:00
parent 17d692ac98
commit ce5f63dd7f
6 changed files with 402 additions and 1 deletions

View File

@ -0,0 +1,44 @@
# xls2ddl
a tool to create related database schema from xlsx file
there is a xlsx template file called "model.xlst" in this repo
## How to use it
1 double click "model.xlst"
2 in "sammary" datasheet, wirte the table name, table label, primary key
3 in "fields" datasheet, create all the field in the table
4 in "validation" datashhet identifies the index
first column is index name
second column choose the "idx"
thrid column for duplicatable index use "index:f1,f2 ..." or "unique:f1, f2, ..." for unique index
5 save it with the tablenme as xlsx file's name
6 repeat 1 to 5 for all the table.
7 translates all xlsx file to ddl sql using
in the folder hold all the xlsx file
for mysql
```
python path/to/xls2ddl.py mysql .
```
for sqlite3
```
python path/to/xls2ddl.py sqlite3 .
```
for oracle
```
python path/to/xls2ddl.py oracle .
```
for postgresql
```
python path/to/xls2ddl.py postgresql .
```
for sqlserver
```
python path/to/xls2ddl.py sqlserver .
```

BIN
model.xltx Normal file

Binary file not shown.

4
requirements.txt Executable file
View File

@ -0,0 +1,4 @@
xlrd
openpyxl
git+https://github.com/yumoqing/appPublic
git+https://github.com/yumoqing/sqlor

@ -1 +0,0 @@
Subproject commit a8be8655e9bde86fedc8a99a51f4e8c6de4697c4

67
xls2ddl.py Executable file
View File

@ -0,0 +1,67 @@
# -*- coding:utf-8 -*-
import io
import sys
from traceback import print_exc
from xlsxData import CRUDData, xlsxFactory
import codecs
import json
from sqlor.ddl_template_sqlserver import sqlserver_ddl_tmpl
from sqlor.ddl_template_mysql import mysql_ddl_tmpl
from sqlor.ddl_template_oracle import oracle_ddl_tmpl
from sqlor.ddl_template_postgresql import postgresql_ddl_tmpl
from appPublic.myTE import MyTemplateEngine
from appPublic.folderUtils import listFile
tmpls = {
"sqlserver":sqlserver_ddl_tmpl,
"mysql":mysql_ddl_tmpl,
"oracle":oracle_ddl_tmpl,
"postgresql":postgresql_ddl_tmpl
}
def xls2ddl(xlsfile,dbtype):
if xlsfile.endswith('json'):
with codecs.open(xlsfile,'r','utf-8') as f:
data = json.load(f)
else:
d = xlsxFactory(xlsfile)
if d is None:
return
data = d.read()
tmpl = tmpls.get(dbtype.lower())
if tmpl is None:
raise Exception('%s database not implemented' % dbtype)
e = MyTemplateEngine([])
s = e.renders(tmpl,data)
return s
def model2ddl(folder,dbtype):
ddl_str = ''
for f in listFile(folder, suffixs=['xlsx','json']):
try:
ddl_str += f'-- {f}\n'
s = xls2ddl(f,dbtype)
ddl_str='%s%s' % (ddl_str, s)
except Exception as e:
print('Exception:',e,'f=',f)
print_exc()
return ddl_str
if __name__ == '__main__':
import sys
##解决windows 终端中输出中文出现
# UnicodeEncodeError: 'gbk' codec can't encode character '\xa0' in position 20249
# 错误
# BEGIN
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
#
# END
if len(sys.argv) < 3:
print('Usage:%s dbtype folder' % sys.argv[0])
sys.exit(1)
s = model2ddl(sys.argv[2], sys.argv[1])
print(s)

287
xlsxData.py Executable file
View File

@ -0,0 +1,287 @@
import os
import sys
from openpyxl import load_workbook
from appPublic.myjson import loadf,dumpf,dumps
from appPublic.dictObject import DictObject
class TypeConvert:
def conv(self,typ,v):
if typ is None:
return v
f = getattr(self,'to_'+typ,None)
if f is None:
return v
return f(v)
def to_int(self,v):
try:
return int(v)
except:
return 0
def to_float(self,v):
try:
return float(v)
except:
return 0.0
def to_str(self,v):
try:
return str(v)
except:
return ''
def to_json(self,v):
if v == '':
return v
try:
return loads(v)
except:
return v
def to_date(self,v):
return v
def to_time(self,v):
return v
def to_timestamp(self,v):
return v
def to_cruddata(self,v):
vs = v.split('"',3)
if vs < 3:
return v
fn = vs[1]
d = CRUDData(fn)
try:
data = d.read()
except Exception as e:
return v
if vs[2] is None:
return data
cmd = "d%s" % vs[2]
ret=eval(cmd,{'d':data})
return ret
def to_xlsxdata(self,v):
vs = v.split('"',3)
if vs < 3:
return v
fn = vs[1]
d = XLSXData(fn)
try:
data = d.read()
except Exception as e:
return v
if vs[2] is None:
return data
cmd = "d%s" % vs[2]
ret=eval(cmd,{'d':data})
return ret
class CRUDException(Exception):
def __init__(self,xlsfile,errmsg,*args,**argv):
Exception.__init__(self,*args,**argv)
self.xlsfile = xlsfile
self.errmsg = errmsg
def __str__(self):
return 'filename:' + self.xlsfile+' error:' + self.errmsg
class XLSXData(object):
def __init__(self,xlsxfile):
atype = type(xlsxfile)
if atype == type('') or atype == type(u''):
self.xlsxfile = xlsxfile
self.book = load_workbook(filename=xlsxfile)
else:
self.book = xlsxfile # is from Factory
def readRecords(self,name,sheet):
i = 1
recs = []
fields = []
tc = TypeConvert()
for i,row in enumerate(sheet.values):
if i==0:
fields = self.getFieldNames(row)
continue
rec = {}
for j, a in enumerate(row):
if a is None:
continue
k = fields[j][0]
v = tc.conv(fields[j][1],a)
rec[k] = v
if rec == {}:
continue
o = DictObject(**rec)
recs.append(o)
return {name:recs}
def read(self):
ret = {}
for i,s in enumerate(self.book.worksheets):
ret.update(self.readRecords(self.book.sheetnames[i], s))
return DictObject(**ret)
def getFieldNames(self,row):
fs = []
for i,f in enumerate(row):
if f is None:
f = 'F_' + str(i)
else:
if type(f) != type(u""):
f = 'F_' + str(f)
"""
else:
f = f.encode('utf-8')
"""
b=f.split(':')
if len(b) < 2:
b.append(None)
fs.append(b)
i+= 1
return fs
class CRUDData(XLSXData):
@classmethod
def isMe(self,book):
names = book.sheetnames
if 'summary' not in names:
return False
if 'fields' not in names:
return False
if 'validation' not in names:
return False
return True
def read(self):
d = XLSXData.read(self)
if not 'summary' in d.keys():
raise CRUDException(self.xlsxfile,'summary sheet missing')
if not 'fields' in d.keys():
raise CRUDException(self.xlsxfile,'fields sheet missing')
if not 'validation' in d.keys():
raise CRUDException(self.xlsxfile,'validation sheet missing')
v = d['summary'][0]['primary']
v = v.split(',')
d['summary'][0]['primary'] = v
d = self.convForeignkey(d)
d = self.convIndex(d)
return d
def convForeignkey(self,data):
vs = data['validation']
nvs = []
for v in vs:
if v['oper'] == 'fk':
m = v['value']
des= m.split(':')
if len(des) != 3:
raise CRUDException(self.xlsxfile,'fk value error:%s' % m)
v['value'] = {'table':des[0],'value':des[1],'title':des[2]}
nvs.append(v)
data['validation'] = nvs
return data
def getFieldByNmae(self,fields,name):
for f in fields:
if f['name'] == name:
return f
def getFKs(self,validation):
fks = []
for v in validation:
if v['oepr'] == 'fk':
fks.append(v)
return fks
def getIDXs(self,validation):
idxs = []
for v in validation:
if v['oper'] == 'idx':
idxs.append(v)
return idxs
def isFieldExist(self,fields,name):
for f in fields:
if f['name'] == name:
return True
return False
def convIndex(self,data):
vs = data['validation']
nvs = []
for v in vs:
if v['oper'] == 'idx':
idx = {}
idx['name'] = v['name']
m = v['value']
des= m.split(':')
if len(des) != 2:
raise CRUDException(self.xlsxfile,'idx value format:idx_type:keylist:%s' % m)
idx['idxtype'] = des[0]
idx['idxfields'] = des[1].split(',')
nvs.append(idx)
data['indexes'] = nvs
return data
def xlsxFactory(xlsxfilename):
def findSubclass(name,klass):
for k in klass.__subclasses__():
if k.isMe(name):
return k
k1 = findSubclass(name,k)
if k1 is not None:
return k1
return None
try:
book = load_workbook(filename=xlsxfilename)
k = findSubclass(book,XLSXData)
if k is not None:
xlsx = k(book)
xlsx.xlsxfile = xlsxfilename
return xlsx
return XLSXData(book)
except Exception as e:
# print(xlsxfilename, 'load failed\n%s' % str(e))
return None
def ValueConvert(s):
if s[:9] == 'xlsfile::':
d = xlsxFactory(s[9:])
return d.read()
if s[:10] == 'jsonfile::':
return loadf(s[10:])
return s
def paramentHandle(ns):
for k,v in ns.items():
ns[k] = ValueConvert(v)
return ns
if __name__ == '__main__':
retData = {}
ns = {}
datafiles = []
for a in sys.argv[1:]:
m = a.split('=',1)
if len(m)>1:
ns[m[0]] = m[1]
else:
datafiles.append(a)
ns = paramentHandle(ns)
for f in datafiles:
ext = os.path.splitext(f)[-1]
if ext in ['.xlsx','.xls' ]:
d = xlsxFactory(f)
data = d.read()
retData.update(data)
retData.update(ns)
print( dumps(retData))