Skip to content

Commit

Permalink
Fixed for Python 3 support. Test problems and parsing seem to be work…
Browse files Browse the repository at this point in the history
…ing properly
  • Loading branch information
siboles committed Oct 27, 2017
1 parent 6b12a89 commit 0fcebe1
Show file tree
Hide file tree
Showing 17 changed files with 358 additions and 250 deletions.
29 changes: 16 additions & 13 deletions febio/Boundary.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
@author: Scott Sibole
'''
from __future__ import print_function
from builtins import range
from builtins import object

class Boundary(object):
'''
Expand All @@ -23,16 +26,16 @@ def __init__(self,steps=1):
spring - list with each entry containing a 6 element list of: type, node 1, node 2, E, force load curve id, scale
'''
self.bcs = []
for _ in xrange(steps):
for _ in range(steps):
self.bcs.append({'fixed': [], 'prescribed': [], 'prescribed relative': [], 'contact': [],'spring': []})

def addFixed(self,nset=None,nodeid=None,dof=None):
if dof is None:
print 'WARNING: No degree of freedom was specified for this boundary condition. Skipping...'
print('WARNING: No degree of freedom was specified for this boundary condition. Skipping...')
pass

if nset is None and nodeid is None:
print 'WARNING: Must specify either a node set or a node id. Skipping...'
print('WARNING: Must specify either a node set or a node id. Skipping...')
pass

if nset is not None:
Expand All @@ -49,19 +52,19 @@ def addFixed(self,nset=None,nodeid=None,dof=None):

def addPrescribed(self,nset=None,step=0,nodeid=None,dof=None,lc=None,scale=None,ptype=None):
if dof is None:
print 'WARNING: No degree of freedom was specified for this boundary condition. Skipping BC assignment...'
print('WARNING: No degree of freedom was specified for this boundary condition. Skipping BC assignment...')
pass

if nset is None and nodeid is None:
print 'WARNING: Must specify either a node set or a node id. Skipping BC assignment...'
print('WARNING: Must specify either a node set or a node id. Skipping BC assignment...')
pass

if lc is None:
print 'WARNING: Must specify a load curve ID. Skipping BC assignment...'
print('WARNING: Must specify a load curve ID. Skipping BC assignment...')
pass

if scale is None:
print 'WARNING: No scale specified for this boundary condition. Using default of 1.0...'
print('WARNING: No scale specified for this boundary condition. Using default of 1.0...')
scale = 1.0
if ptype is not None:
keywd = 'prescribed relative'
Expand All @@ -81,11 +84,11 @@ def addPrescribed(self,nset=None,step=0,nodeid=None,dof=None,lc=None,scale=None,

def addContact(self,step=0,ctype=None,master=None,slave=None,attributes=None):
if ctype is None:
print 'WARNING: Did not specify a contact type. Skipping assignment...'
print('WARNING: Did not specify a contact type. Skipping assignment...')
pass

elif master is None or slave is None:
print 'WARNING: Did not specify an appropriate value for the master and/or slave. Skipping assignment...'
print('WARNING: Did not specify an appropriate value for the master and/or slave. Skipping assignment...')
pass
try:
if isinstance(master[0][0],list):
Expand All @@ -109,17 +112,17 @@ def addContact(self,step=0,ctype=None,master=None,slave=None,attributes=None):

def addSpring(self,step=0,stype='linear',nodes=[None,None],E=None,lc=None,scale=1.0):
if len(nodes) != 2 or not isinstance(nodes[0],int) or not isinstance(nodes[1],int):
print 'WARNING: List of nodes must be 2 integer elements. Skipping spring definition...'
print('WARNING: List of nodes must be 2 integer elements. Skipping spring definition...')
pass
if stype=='linear' or stype=='tension-only nonlinear':
if E is None:
print 'WARNING: Must specify a spring stiffness if type is linear or tension-only linear. Skipping spring definition...'
print('WARNING: Must specify a spring stiffness if type is linear or tension-only linear. Skipping spring definition...')
pass
if stype=='nonlinear' and lc is None:
print 'WARNING: Must specify a force load curve if type is nonlinear. Skipping spring definition...'
print('WARNING: Must specify a force load curve if type is nonlinear. Skipping spring definition...')
pass
if stype=='nonlinear' and scale is None:
print 'WARNING: No scale was specified. Using default value of 1.0...'
print('WARNING: No scale was specified. Using default value of 1.0...')
scale = 1.0

self.bcs[step]['spring'].append({'stype':stype,'n1':nodes[0],'n2':nodes[1],'E':E,'lc':lc,'scale':scale})
Expand Down
Binary file modified febio/Boundary.pyc
Binary file not shown.
9 changes: 3 additions & 6 deletions febio/Control.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
@author: Scott Sibole
'''
import string
from builtins import object

class Control(object):
'''
Expand Down Expand Up @@ -36,8 +36,5 @@ def __init__(self):
'integration': None
}
def setAttributes(self,specified):
for i in specified.keys():
self.attributes[string.lower(i)] = specified[i]



for i in list(specified.keys()):
self.attributes[i.lower()] = specified[i]
Binary file modified febio/Control.pyc
Binary file not shown.
79 changes: 47 additions & 32 deletions febio/FebPlt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import numpy as np
import os, re
import os
from collections import OrderedDict

class FebPlt(object):
Expand Down Expand Up @@ -96,19 +102,22 @@ def __init__(self,filename):
self._parseModel()
self._cleanData()

def _asHexString(self, x):
return "{:08x}".format(x)

def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
if '%08x' % chunk != '00464542':
print 'File passed to FebPlt() is not an FEBio xplt. Exiting...'
if self._asHexString(chunk[0]) != '00464542':
print('File passed to FebPlt() is not an FEBio xplt. Exiting...')
raise SystemExit
self._read_size += 4
while self._read_size < self._filesize:
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'VERSION':
self.VERSION = '%08x' % np.fromfile(self._fid,dtype=np.uint32,count=2)[-1]
self.VERSION = self._asHexString(np.fromfile(self._fid,dtype=np.uint32,count=2)[-1])
self._read_size += 8
elif keyword == 'NODES':
self.NODES = np.fromfile(self._fid,dtype=np.uint32,count=2)[-1]
Expand All @@ -120,7 +129,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword in ['GLOBAL_DATA','MATERIAL_DATA','NODESET_DATA','DOMAIN_DATA','SURFACE_DATA']:
self._readDictSect(np.fromfile(self._fid,dtype=np.uint32,count=1)+self._read_size,keyword)
except:
Expand All @@ -133,7 +142,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'MAT_ID':
self.MATERIAL[-1]['MAT_ID'] = np.fromfile(self._fid,dtype=np.uint32,count=2)[-1]
self._read_size += 8
Expand All @@ -155,7 +164,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'NODE_COORDS':
np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
Expand All @@ -171,7 +180,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'DOMAIN':
self._readDomain(np.fromfile(self._fid,dtype=np.uint32,count=1)[0]+self._read_size)
except:
Expand All @@ -183,7 +192,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'SURFACE':
self._readSurface(np.fromfile(self._fid,dtype=np.uint32,count=1)[0]+self._read_size)
except:
Expand All @@ -197,7 +206,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'TIME':
self.STATE_SECTION[-1]['TIME'] = np.fromfile(self._fid,dtype=np.float32,count=2)[1]
self.TIME.append(self.STATE_SECTION[-1]['TIME'])
Expand All @@ -209,7 +218,7 @@ def _parseModel(self):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)[0]
self._read_size += 4
try:
subkeyword = self._lookup['%08x' % chunk]
subkeyword = self._lookup[self._asHexString(chunk)]
if subkeyword == 'VARIABLE_ID':
self._addStateData(keyword)
self.STATE_SECTION[-1][keyword][-1]['VARIABLE_ID'].append(np.fromfile(self._fid,dtype=np.uint32,count=2)[-1])
Expand All @@ -223,7 +232,7 @@ def _parseModel(self):
region_size = np.fromfile(self._fid,dtype=np.uint32,count=1)[0]
self._read_size += 8
self.STATE_SECTION[-1][keyword][-1]['DATA'].append({'REGION_ID': region_id,
'DATA': np.reshape(np.fromfile(self._fid,dtype=np.float32,count=region_size/4),(-1,vlengths[itype]))})
'DATA': np.reshape(np.fromfile(self._fid,dtype=np.float32,count=old_div(region_size,4)),(-1,vlengths[itype]))})
self._read_size += region_size
except:
continue
Expand All @@ -241,7 +250,7 @@ def _readDictSect(self,size,key):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'DICTIONARY_ITEM':
self._addDictionaryItem(key)
elif keyword == 'ITEM_TYPE':
Expand All @@ -253,12 +262,18 @@ def _readDictSect(self,size,key):
elif keyword == 'ITEM_NAME':
np.fromfile(self._fid,dtype=np.uint32,count=1) #read a 4 byte chunk to move to char64 array
self._read_size += 4
dmy = np.fromfile(self._fid,dtype=np.dtype((str,64)),count=1)
z = re.compile(r'(\x00)')
dmy = z.split(dmy[0])
word = ''
c = 0
while c<64:
dmy = np.fromfile(self._fid, dtype='|S1', count=1)[0]
c += 1
if dmy != b'':
word = ''.join([word, dmy.decode('ascii')])
else:
np.fromfile(self._fid, dtype='|S1', count=64-c)
break
self._read_size += 64
dmy = str(dmy[0])
self.DICTIONARY[key][-1]['ITEM_NAME'] = dmy
self.DICTIONARY[key][-1]['ITEM_NAME'] = word
except:
continue

Expand All @@ -271,7 +286,7 @@ def _readDomain(self,size):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'ELEM_TYPE':
etype = np.fromfile(self._fid,dtype=np.uint32,count=2)[-1]
self.DOMAIN_SECTION[-1]['DOMAIN_HEADER']['ELEM_TYPE'] = elmtypes[etype]
Expand All @@ -295,7 +310,7 @@ def _readSurface(self,size):
chunk = np.fromfile(self._fid,dtype=np.uint32,count=1)
self._read_size += 4
try:
keyword = self._lookup['%08x' % chunk]
keyword = self._lookup[self._asHexString(chunk[0])]
if keyword == 'SURFACE_ID':
self.SURFACE_SECTION[-1]['SURFACE_HEADER']['SURFACE_ID'] = np.fromfile(self._fid,dtype=np.uint32,count=2)[-1]
self._read_size += 8
Expand Down Expand Up @@ -345,24 +360,24 @@ def _cleanData(self):
used_node[n] = True
mapping[-1]['MULT'].append(n)
N = len(self.STATE_SECTION)
for i in xrange(N):
for j in xrange(len(self.STATE_SECTION[i]['NODESET_DATA'][0]['DATA'][0]['DATA'])):
for i in range(N):
for j in range(len(self.STATE_SECTION[i]['NODESET_DATA'][0]['DATA'][0]['DATA'])):
try:
self.NodeData[j+1]['displacement'][i,:] = self.STATE_SECTION[i]['NODESET_DATA'][0]['DATA'][0]['DATA'][j]
except:
self.NodeData[j+1] = {}
self.NodeData[j+1]['displacement'] = np.zeros((N,3),dtype=np.float32)
self.NodeData[j+1]['displacement'][i,:] = self.STATE_SECTION[i]['NODESET_DATA'][0]['DATA'][0]['DATA'][j]

for j in xrange(len(self.STATE_SECTION[i]['DOMAIN_DATA'])):
for j in range(len(self.STATE_SECTION[i]['DOMAIN_DATA'])):
vname = self.DICTIONARY['DOMAIN_DATA'][j]['ITEM_NAME']
vformat = self.DICTIONARY['DOMAIN_DATA'][j]['ITEM_FORMAT']
if vformat == 'ITEM':
for k in xrange(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
for k in range(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
dat = self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'][k]['DATA']
m = mapping[k][vformat]
M,L = dat.shape #rows in dat
for l in xrange(M):
for l in range(M):
try:
self.ElementData[m[l]][vname][i,:] = dat[l,:]
except:
Expand All @@ -373,11 +388,11 @@ def _cleanData(self):
self.ElementData[m[l]][vname] = np.zeros((N,L),dtype=np.float32)
self.ElementData[m[l]][vname][i,:] = dat[l,:]
elif vformat == 'NODE':
for k in xrange(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
for k in range(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
dat = self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'][k]['DATA']
m = mapping[k][vformat]
M,L = dat.shape #rows in dat
for l in xrange(M):
for l in range(M):
n = m[l]+1
try:
self.NodeData[n][vname][i,:] = dat[l,:]
Expand All @@ -390,11 +405,11 @@ def _cleanData(self):
self.NodeData[n][vname][i,:] = dat[l,:]
elif vformat == 'MULT':
accessed = {}
for k in xrange(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
for k in range(len(self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'])):
dat = self.STATE_SECTION[i]['DOMAIN_DATA'][j]['DATA'][k]['DATA']
m = mapping[k][vformat]
M,L = dat.shape #rows in dat
for l in xrange(M):
for l in range(M):
n = m[l]+1
try:
self.NodeData[n][vname][i,:] = self.NodeData[n][vname][i,:] + dat[l,:]
Expand All @@ -410,9 +425,9 @@ def _cleanData(self):
self.NodeData[n][vname] = np.zeros((N,L),dtype=np.float32)
self.NodeData[n][vname][i,:] = self.NodeData[n][vname][i,:] + dat[l,:]
accessed[n] = 1
for nid in self.NodeData.keys():
for nid in list(self.NodeData.keys()):
try:
self.NodeData[nid][vname][i,:] = self.NodeData[nid][vname][i,:]/accessed[nid]
self.NodeData[nid][vname][i,:] = old_div(self.NodeData[nid][vname][i,:],accessed[nid])
except:
continue

Expand Down
Loading

0 comments on commit 0fcebe1

Please sign in to comment.