-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_batch.py
256 lines (215 loc) · 10.1 KB
/
test_batch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
import datetime
from os.path import dirname, join
import pytest
import pytz
from sqlalchemy import func
from snowex_db.batch import *
from snowexsql.data import ImageData, LayerData, SiteData
from .sql_test_base import TableTestBase, pytest_generate_tests
class TestUploadSiteDetailsBatch(TableTestBase):
"""
Test uploading multiple site details files to the sites table
"""
args = [['site_5S21.csv', 'site_details.csv']]
kwargs = {'epsg': 26912, 'in_timezone': 'US/Mountain'}
UploaderClass = UploadSiteDetailsBatch
TableClass = SiteData
count_attribute = 'site_id'
# Define params which is a dictionary of test names and their args
params = {
# Count the number of sites
'test_count': [dict(data_name='5S21', expected_count=1),
dict(data_name='1N20', expected_count=1)],
# Test certain values were assigned
'test_value': [dict(data_name='1N20', attribute_to_check='slope_angle', filter_attribute='date',
filter_value=datetime.date(2020, 2, 5), expected=5),
dict(data_name='5S21', attribute_to_check='ground_roughness', filter_attribute='date',
filter_value=datetime.date(2020, 2, 1), expected='Smooth')],
# dummy test just fill this spot since a single site only has 1 of each attribute
'test_unique_count': [dict(data_name='1N20', attribute_to_count='date', expected_count=1)]
}
def test_extended_geom(self):
g = self.session.query(SiteData.geom).limit(1).all()
assert g[0][0].srid == 26912
class TestUploadProfileBatch(TableTestBase):
"""
Test uploading multiple vertical profiles
"""
args = [['stratigraphy.csv', 'temperature.csv']]
kwargs = {'in_timezone': 'UTC'}
UploaderClass = UploadProfileBatch
TableClass = LayerData
params = {
'test_count': [dict(data_name='hand_hardness', expected_count=5),
dict(data_name='temperature', expected_count=5)],
'test_value': [
dict(data_name='hand_hardness', attribute_to_check='observers', filter_attribute='depth', filter_value=17,
expected=None),
dict(data_name='hand_hardness', attribute_to_check='comments', filter_attribute='depth', filter_value=17,
expected='Cups')],
'test_unique_count': [dict(data_name='manual_wetness', attribute_to_count='value', expected_count=1)]
}
class TestUploadProfileBatchErrors():
"""
Test uploading multiple vertical profiles
"""
files = ['doesnt_exist.csv']
def test_without_debug(self):
"""
Test batch uploading without debug and errors
"""
u = UploadProfileBatch(self.files, credentials=join(dirname(__file__), 'credentials.json'), debug=False)
u.push()
assert len(u.errors) == 1
def test_with_debug(self):
"""
Test batch uploading with debug and errors
"""
with pytest.raises(Exception):
u = UploadProfileBatch(self.files, debug=True)
u.push()
def test_without_files(self):
"""
Test that batch correctly runs with no files
"""
u = UploadProfileBatch([], credentials=join(dirname(__file__), 'credentials.json'), debug=True)
u.push()
assert u.uploaded == 0
class TestUploadLWCProfileBatch(TableTestBase):
"""
Test uploading multiple two types of the LWC profiles
"""
args = [['LWC.csv', 'LWC2.csv']]
kwargs = {'in_timezone': 'US/Mountain'}
UploaderClass = UploadProfileBatch
TableClass = LayerData
params = {
'test_count': [dict(data_name='permittivity', expected_count=(4 + 8)),
dict(data_name='lwc_vol', expected_count=8)],
'test_value': [dict(data_name='density', attribute_to_check='value', filter_attribute='depth', filter_value=83,
expected=164.5)],
'test_unique_count': [dict(data_name='permittivity', attribute_to_count='site_id', expected_count=2)]
}
class TestUploadSMPBatch(TableTestBase):
"""
Test whether we can assign meta info from an smp log to 2 profiles
"""
args = [['S19M1013_5S21_20200201.CSV', 'S06M0874_2N12_20200131.CSV']]
kwargs = {
'in_timezone': 'UTC',
'smp_log_f': 'smp_log.csv',
'units': 'Newtons'}
UploaderClass = UploadProfileBatch
TableClass = LayerData
attribute = 'depth'
params = {
# Test that the number of entries equals the number of lines of data from both files
'test_count': [dict(data_name='force', expected_count=(242 + 154))],
'test_value': [
dict(data_name='force', attribute_to_check='site_id', filter_attribute='depth', filter_value=-100,
expected='5S21'),
dict(data_name='force', attribute_to_check='site_id', filter_attribute='depth', filter_value=-0.4,
expected='2N12'),
dict(data_name='force', attribute_to_check='comments', filter_attribute='depth', filter_value=-0.4,
expected='started 1-2 cm below surface'),
dict(data_name='force', attribute_to_check='time', filter_attribute='id', filter_value=1,
expected=datetime.time(hour=16, minute=16, second=49, tzinfo=pytz.FixedOffset(-420))),
dict(data_name='force', attribute_to_check='units', filter_attribute='depth', filter_value=-0.4,
expected='Newtons'),
],
'test_unique_count': [dict(data_name='force', attribute_to_count='date', expected_count=2),
dict(data_name='force', attribute_to_count='time', expected_count=2)]
}
@pytest.mark.parametrize('site, count', [
('5S21', 242),
('2N12', 154)
])
def test_single_profile_count(self, site, count):
"""
Ensure that each site can be filtered to its 10 points in its own profile
"""
records = self.session.query(LayerData).filter(LayerData.site_id == site).all()
depth = [r.depth for r in records]
value = [r.value for r in records]
assert len(records) == count
class TestUploadRasterBatch(TableTestBase):
"""
Class testing the batch uploading of rasters
"""
args = [['be_gm1_0287/w001001x.adf', 'be_gm1_0328/w001001x.adf']]
kwargs = {
'type': 'dem', 'observers': 'QSI',
'units': 'meters',
'epsg': 26912,
'use_s3': False
}
UploaderClass = UploadRasterBatch
TableClass = ImageData
params = {
'test_count': [dict(data_name='dem', expected_count=32)],
'test_value': [dict(data_name='dem', attribute_to_check='observers', filter_attribute='id', filter_value=1,
expected='QSI'),
dict(data_name='dem', attribute_to_check='units', filter_attribute='id', filter_value=1,
expected='meters'),
],
# Dummy input
'test_unique_count': [dict(data_name='dem', attribute_to_count='date', expected_count=1), ]
}
class TestUploadUAVSARBatch(TableTestBase):
"""
Test test the UAVSAR uploader by providing one ann file which should upload
all of the uavsar images.
"""
observers = 'UAVSAR team, JPL'
# Upload all uav
d = join(dirname(__file__), 'data', 'uavsar')
args = [['uavsar.ann']]
kwargs = {
'observers': observers,
'epsg': 26912,
'geotiff_dir': d,
'instrument': 'UAVSAR, L-band InSAR',
'use_s3': False
}
UploaderClass = UploadUAVSARBatch
TableClass = ImageData
params = {
'test_count': [dict(data_name='insar amplitude', expected_count=18),
dict(data_name='insar correlation', expected_count=9),
dict(data_name='insar interferogram real', expected_count=9),
dict(data_name='insar interferogram imaginary', expected_count=9)],
'test_value': [
dict(data_name='insar interferogram imaginary', attribute_to_check='observers', filter_attribute='units',
filter_value='Linear Power and Phase in Radians', expected='UAVSAR team, JPL'),
dict(data_name='insar interferogram real', attribute_to_check='units', filter_attribute='observers',
filter_value=observers, expected='Linear Power and Phase in Radians'),
dict(data_name='insar correlation', attribute_to_check='instrument', filter_attribute='observers',
filter_value=observers, expected='UAVSAR, L-band InSAR'),
],
# Test we have two dates for the insar amplitude overapasses
'test_unique_count': [dict(data_name='insar amplitude', attribute_to_count='date', expected_count=2), ]
}
def test_uavsar_date(self):
"""
Github actions is failing on a test pulling 1 of 2 uavsar dates. This is likely because the dates are not in
the same order as our tests are expecting. This test accomplishes the same test but adds assurance around which
date is being checked.
"""
results = self.session.query(func.min(ImageData.date)).filter(ImageData.type == 'insar amplitude').all()
assert results[0][0] == datetime.date(2020, 2, 1)
@pytest.mark.parametrize("data_name, kw", [
# Check the single pass products have a few key words
('amplitude', ['duration', 'overpass', 'polarization', 'dem']),
# Check the derived products all have a ref to 1st and 2nd overpass in addition to the others
('correlation', ['duration', 'overpass', '1st', '2nd', 'polarization', 'dem']),
('interferogram real', ['duration', 'overpass', '1st', '2nd', 'polarization', 'dem']),
('interferogram imaginary', ['duration', 'overpass', '1st', '2nd', 'polarization', 'dem']),
])
def test_description_generation(self, data_name, kw):
"""
Asserts each kw is found in the description of the data
"""
name = 'insar {}'.format(data_name)
records = self.session.query(ImageData.description).filter(ImageData.type == name).all()
for k in kw:
assert k in records[0][0].lower()