In [1]:
import json
from adabas.api import *
from adabas.datamap import *
In [ ]:
from datetime import date
mask_date = lambda data, mask='%d-%m-%Y': date.fromordinal(int(data)-364).strftime(mask)
In [ ]:
#campo elementar de um grupo periódico ou campo elementar múltiplo
def get_periodic(isn = 0 # isn a ser pesquisado
,gsn = '' # grupo short name ou elementar multiplo - FDT
,esn = '' # elementar short name - FDT
,lgn = '' # elementar long-name - DDM
,lsn = 0 # length elementar - DDM
,upk = '' # unpack para colunas packeadas '' or True
,scl = '' # scale or '' - DDM
,Int = '' # True para numéricos
):
ret = ''
try:
upk = ',U' if upk else ''
Str = '"'
a.cb.isn=isn
if scl or Int:
Int = 'int('
Str = ''
if scl:
scl = ') * .{0:>0{1}}'.format('1', scl)
else:
scl = ')'
a.fb.value='{}C,002,B.'.format(gsn)
a.get(isn=a.cb.isn)
if a.rb.value:
occ=int(a.rb.value.encode('hex') ) # count occurs group
exec ("row = Datamap('Rows', {})".format("String('{lg}{}', {l})," * occ).format(lg=lgn, *range(1,occ+1), l=lsn))
row.buffer = a.rb
a.fb.value='{}1-{},{}{}.'.format(esn, occ, lsn, upk)
a.get(isn=a.cb.isn)
ret = eval("""'"{}": [{}]'.format({})""".format( lgn, (Str+'{}'+Str+', ') * occ, ('{i}row.{ln}{}{sc}, '* occ).format(i=Int,ln=lgn,sc=scl,*range(1,occ+1))))
# ret = ret[:-3] + ']'
ret = ret.replace(', ]', ']')
except DatabaseError, (error,apa):
return error.split(':')[1]
return ret
In [ ]:
#campo multiplo de um grupo periódico
def get_per_mult(isn = 0 # isn a ser pesquisado
,gsn = '' # grupo short name - FDT
,esn = '' # elementar short name - FDT
,lgn = '' # elementar long-name - DDM
,lsn = 0 # length elementar - DDM
,upk = '' # unpack para colunas packeadas '' or True
,scl = '' # scale or '' - DDM
,Int = '' # True para numéricos
):
ret = ''
try:
upk = ',U' if upk else ''
Str = '"'
a.cb.isn=isn
if scl or Int:
Int = 'int('
Str = ''
if scl:
scl = ') * .{0:>0{1}}'.format('1', scl)
else:
scl = ')'
lst = '['
a.fb.value='{}C,002,B.'.format(gsn)
a.get(isn=a.cb.isn)
if a.rb.value:
gocc=int(a.rb.value.encode('hex') ) # count occurs group
for o in range(1,gocc+1):
a.fb.value='{}{}C,002,B.'.format(esn,o)
a.get(isn=a.cb.isn)
if a.rb.value:
occ=int(a.rb.value.encode('hex') ) # count occurs elementar
exec ("row = Datamap('Rows', {})".format("String('{lg}{}', {l})," * occ).format(lg=lgn, *range(1,occ+1), l=lsn))
row.buffer = a.rb
a.fb.value='{}{}(1-{}),{}{}.'.format(esn, o, occ, lsn, upk)
a.get(isn=a.cb.isn)
ret = eval("""'[{}]'.format({})""".format((Str+'{}'+Str+', ') * occ, ('{i}row.{ln}{}{sc}, '* occ).format(i=Int,ln=lgn,sc=scl,*range(1,occ+1))))
lst += '{}, '.format(ret)
else:
break
except DatabaseError, (error,apa):
return error.split(':')[1]
return '"{}": {}]'.format(lgn, lst).replace(', ]', ']')
In [ ]:
DBID=12;FNR=11
STARTISN=282
RCOUNT=5
row=Datamap('rows',
String('personnel_id', 8), # aa 1
String('first_name', 20), # ac 2
String('middle_name', 20), # ad 3
String('name', 20), # ae 4
String('birth', 6), # ah 5
String('country', 3), # al 6
String('area_code', 6), # an 7
String('phone', 15), # am 8
String('dept', 6), # ao 9
String('job_title', 25)) # ap 10
In [ ]:
lines=''
extraline=''
alter=False
c1=Adabas(rbl=256,fbl=64,sbl=32,vbl=128,ibl=0)
c1.cb.dbid=DBID
c1.cb.fnr=FNR
c1.cb.cid='1010'
c1.fb.value='AA,AC,AD,AE,AH,6,U,AL,AN,AM,AO,AP.'
In [ ]:
a=Adabas(rbl=256,fbl=64,sbl=32,vbl=128,ibl=0)
a.cb.dbid=DBID
a.cb.fnr=FNR
In [ ]:
c1.cb.isn=STARTISN
# use emp Datamap on record buffer
row.buffer=c1.rb
row.offset=0
count=0
try:
for count in range(RCOUNT):
c1.readByIsn(getnext=1)
lines += """{}"{}": {}, "{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", {}, {}{}\n""".format("{"
,'isn' , c1.cb.isn
,'personnel_id', row.personnel_id
,'first_name' , row.first_name
,'middle_name' , row.middle_name
,'name' , row.name
,'birth' , mask_date(row.birth)
,'country' , row.country
,'area_code' , row.area_code
,'phone' , row.phone
,'dept' , row.dept
,'job_title' , row.job_title
,get_periodic(isn=c1.cb.isn, gsn='AQ', esn='AR', lgn='curr_code', lsn = 3)
,get_per_mult(isn=c1.cb.isn, gsn='AQ', esn='AT', lgn='bonus', lsn=9, upk=True, Int=True)
,"}")
lines+= 'Sequential Read by ISN returned '+str(count+1)+' record(s).'
except DataEnd:
extraline+= 'Sequential Read by ISN returned '+str(count)+' record(s).'
pass
except DatabaseError, (line, apa):
extraline+='Database Error:'+line
In [ ]:
line = lines.splitlines()
In [ ]:
line[:5]
In [ ]:
parsed = json.loads(line[1].decode('utf-8'))
In [ ]:
parsed
In [ ]:
parsed['bonus']