Skip to content

Commit

Permalink
Merge pull request #125 from ODM2/sfdataset
Browse files Browse the repository at this point in the history
Sfdataset
  • Loading branch information
sreeder authored Nov 17, 2017
2 parents 2d3e4ab + d84c252 commit dd1ec35
Show file tree
Hide file tree
Showing 2 changed files with 86 additions and 33 deletions.
64 changes: 54 additions & 10 deletions odm2api/ODM2/services/readService.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,43 @@ def __init__(self, affiliation, person, org):
self.Organization = '(' + org.OrganizationCode + ') ' + org.OrganizationName


class SamplingFeatureDataSet():
datasets={}
def __init__(self, samplingfeature, datasetresults):
sf = samplingfeature

self.SamplingFeatureID = sf.SamplingFeatureID
self.SamplingFeatureUUID = sf.SamplingFeatureUUID
self.SamplingFeatureTypeCV = sf.SamplingFeatureTypeCV
self.SamplingFeatureCode = sf.SamplingFeatureCode
self.SamplingFeatureName = sf.SamplingFeatureName
self.SamplingFeatureDescription = sf.SamplingFeatureDescription
self.SamplingFeatureGeotypeCV = sf.SamplingFeatureGeotypeCV
self.Elevation_m = sf.Elevation_m
self.ElevationDatumCV = sf.ElevationDatumCV
self.FeatureGeometryWKT = sf.FeatureGeometryWKT
self.assignDatasets(datasetresults)

print(self.datasets)


def assignDatasets(self, datasetresults):
for dsr in datasetresults:
if dsr.DataSetObj not in self.datasets:
#if the dataset is not in the dictionary, add it and the first result
self.datasets[dsr.DataSetObj]=[]
res = dsr.ResultObj
# res.FeatureActionObj = None
self.datasets[dsr.DataSetObj].append(res)
else:
#if the dataset is in the dictionary, append the result object to the list
res = dsr.ResultObj
# res.FeatureActionObj = None
self.datasets[dsr.DataSetObj].append(res)




class ReadODM2(serviceBase):
# Exists functions
def resultExists(self, result):
Expand Down Expand Up @@ -871,7 +908,6 @@ def getSamplingFeatureDatasets(self, ids=None, codes=None, uuids=None, dstype=No
raise ValueError('Expected samplingFeatureID OR samplingFeatureUUID OR samplingFeatureCode argument')

sf_query = self._session.query(SamplingFeatures)

if ids:
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureID.in_(ids))
if codes:
Expand All @@ -880,21 +916,29 @@ def getSamplingFeatureDatasets(self, ids=None, codes=None, uuids=None, dstype=No
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureUUID.in_(uuids))
sf_list = []
for sf in sf_query.all():
sf_list.append(sf.SamplingFeatureID)
sf_list.append(sf)

q = self._session.query(DataSetsResults)\
.join(Results)\
.join(FeatureActions)\
.filter(FeatureActions.SamplingFeatureID.in_(sf_list))
sfds = None
try:
sfds=[]
for sf in sf_list:

if dstype:
q = q.filter_by(DatasetTypeCV=dstype)
q = self._session.query(DataSetsResults)\
.join(Results)\
.join(FeatureActions)\
.filter(FeatureActions.SamplingFeatureID == sf.SamplingFeatureID)

try:
return q.all()
if dstype:
q = q.filter_by(DatasetTypeCV=dstype)


vals = q.all()

sfds.append(SamplingFeatureDataSet(sf, vals))
except Exception as e:
print('Error running Query: {}'.format(e))
return None
return sfds

# Data Quality
def getDataQuality(self):
Expand Down
55 changes: 32 additions & 23 deletions tests/test_odm2/test_readservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,42 +130,51 @@ def test_getDataSetsValues(self):


def test_getSamplingFeatureDataSets(self):
try:
#find a sampling feature that is associated with a dataset
sf = self.engine.execute(
'SELECT * from SamplingFeatures as sf '
'inner join FeatureActions as fa on fa.SamplingFeatureID == sf.SamplingFeatureID '
'inner join Results as r on fa.FeatureActionID == r.FeatureActionID '
'inner join DataSetsResults as ds on r.ResultID == ds.ResultID '
).fetchone()
assert len(sf) > 0

#get the dataset associated with the sampling feature
ds = self.engine.execute(
'SELECT * from DataSetsResults as ds '
'inner join Results as r on r.ResultID == ds.ResultID '
'inner join FeatureActions as fa on fa.FeatureActionID == r.FeatureActionID '
'where fa.SamplingFeatureID = ' + str(sf[0])
).fetchone()
assert len(ds) > 0

#find a sampling feature that is associated with a dataset
sf = self.engine.execute(
'SELECT * from SamplingFeatures as sf '
'inner join FeatureActions as fa on fa.SamplingFeatureID == sf.SamplingFeatureID '
'inner join Results as r on fa.FeatureActionID == r.FeatureActionID '
'inner join DataSetsResults as ds on r.ResultID == ds.ResultID '
).fetchone()
assert len(sf) > 0
print (sf[0])
# get the dataset associated with the sampling feature using hte api
dsapi = self.reader.getSamplingFeatureDatasets(ids=[sf[0]])

#get the dataset associated with the sampling feature
ds = self.engine.execute(
'SELECT * from DataSetsResults as ds '
'inner join Results as r on r.ResultID == ds.ResultID '
'inner join FeatureActions as fa on fa.FeatureActionID == r.FeatureActionID '
'where fa.SamplingFeatureID = ' + str(sf[0])
).fetchone()
assert len(ds) > 0
assert dsapi is not None
assert len(dsapi) > 0
assert dsapi[0].datasets is not None
assert dsapi[0].SamplingFeatureID == sf[0]
# assert ds[0] == dsapi[0]
except Exception as ex:
assert False
finally:
self.reader._session.rollback()

print (sf[0])
# get the dataset associated with the sampling feature using hte api
dsapi = self.reader.getSamplingFeatureDatasets(ids=[sf[0]])

assert dsapi is not None
assert len(dsapi) > 0
assert ds[1] == dsapi[0].DataSetID

# Results
def test_getAllResults(self):

# get all results from the database
res = self.engine.execute('SELECT * FROM Results').fetchall()
print(res)
# get all results using the api
resapi = self.reader.getResults()
assert len(res) == len(resapi)


def test_getResultsByID(self):
# get a result from the database
res = self.engine.execute('SELECT * FROM Results').fetchone()
Expand Down

0 comments on commit dd1ec35

Please sign in to comment.