Skip to content

Commit

Permalink
Merge pull request #1311 from Unidata/fix_assert
Browse files Browse the repository at this point in the history
assert is not a function
  • Loading branch information
jswhit authored Feb 16, 2024
2 parents 98b127b + 39d14b5 commit c7c5f4c
Show file tree
Hide file tree
Showing 24 changed files with 76 additions and 76 deletions.
12 changes: 6 additions & 6 deletions test/test_Unsigned.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,20 @@ def test_unsigned(self):
# issue 671
with netCDF4.Dataset(test_dir / "issue671.nc") as f:
data1 = f['soil_moisture'][:]
assert(np.ma.isMA(data1))
assert np.ma.isMA(data1)
f.set_auto_scale(False)
data2 = f['soil_moisture'][:]
assert(data1.mask.sum() == data2.mask.sum())
assert data1.mask.sum() == data2.mask.sum()

# issue 794
# test that valid_min/valid_max/_FillValue are
# treated as unsigned integers.
with netCDF4.Dataset(test_dir / "20171025_2056.Cloud_Top_Height.nc") as f:
data = f['HT'][:]
assert(data.mask.sum() == 57432)
assert(int(data.max()) == 15430)
assert(int(data.min()) == 0)
assert(data.dtype == np.float32)
assert data.mask.sum() == 57432
assert int(data.max()) == 15430
assert int(data.min()) == 0
assert data.dtype == np.float32


if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions test/test_chunk_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def setUp(self):
# this change lasts only as long as file is open.
v = nc.createVariable('frank','f',('fred',),chunk_cache=15000)
size, nelems, preempt = v.get_var_chunk_cache()
assert(size==15000)
assert size==15000
self.file=file_name
nc.close()

Expand All @@ -31,10 +31,10 @@ def runTest(self):
netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt)
nc = netCDF4.Dataset(self.file, mode='r')
# check to see that chunk cache parameters were changed.
assert(netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt))
assert netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt)
# change cache parameters for variable, check
nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2)
assert(nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2))
assert nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2)
nc.close()

if __name__ == '__main__':
Expand Down
8 changes: 4 additions & 4 deletions test/test_compoundatt.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ def runTest(self):
assert_array_equal(vv.units['speed'], windunits['speed'].squeeze())
assert_array_equal(vv.units['direction'],\
windunits['direction'].squeeze())
assert(v.units['speed'] == b'm/s')
assert(v.units['direction'] == b'degrees')
assert(vv.units['speed'] == b'm/s')
assert(vv.units['direction'] == b'degrees')
assert v.units['speed'] == b'm/s'
assert v.units['direction'] == b'degrees'
assert vv.units['speed'] == b'm/s'
assert vv.units['direction'] == b'degrees'
f.close()

if __name__ == '__main__':
Expand Down
12 changes: 6 additions & 6 deletions test/test_compoundvar.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def setUp(self):
dataoutg = vv[:]
assert (cmptype4 == dtype4a) # data type should be aligned
assert (dataout.dtype == dtype4a) # data type should be aligned
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5]
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
Expand All @@ -99,8 +99,8 @@ def runTest(self):
dataoutg = vv[:]
# make sure data type is aligned
assert (f.cmptypes['cmp4'] == dtype4a)
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5]
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
Expand All @@ -122,7 +122,7 @@ def runTest(self):
station_data_t2 = f.createCompoundType(dtype_nest,'station_data')
f.createDimension('station',None)
statdat = f.createVariable('station_obs', station_data_t2, ('station',))
assert(statdat.dtype == station_data_t2.dtype)
assert statdat.dtype == station_data_t2.dtype
datain = np.empty(2,station_data_t2.dtype_view)
datain['observation'][:] = (123,314)
datain['station_name'][:] = ('Boulder','New York')
Expand All @@ -132,7 +132,7 @@ def runTest(self):
f.close()
f = Dataset(self.file)
dataout = f['station_obs'][:]
assert(dataout.dtype == station_data_t2.dtype_view)
assert dataout.dtype == station_data_t2.dtype_view
assert_array_equal(datain, dataout)
f.close()

Expand Down
12 changes: 6 additions & 6 deletions test/test_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def runTest(self):
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
assert size < 0.95*uncompressed_size
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
Expand All @@ -114,22 +114,22 @@ def runTest(self):
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
assert size < 0.85*uncompressed_size
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
checkarray = _quantize(array,lsd)
assert_almost_equal(checkarray,f.variables['data'][:])
assert_almost_equal(checkarray,f.variables['data2'][:])
assert(size < 0.27*uncompressed_size)
assert size < 0.27*uncompressed_size
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert_almost_equal(checkarray,f.variables['data2'][:])
assert(size < 0.20*uncompressed_size)
assert size < 0.20*uncompressed_size
size_save = size
f.close()
# check lossy compression with shuffle and fletcher32 checksum.
Expand All @@ -141,9 +141,9 @@ def runTest(self):
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True}
assert(size < 0.20*uncompressed_size)
assert size < 0.20*uncompressed_size
# should be slightly larger than without fletcher32
assert(size > size_save)
assert size > size_save
# check chunksizes
f.close()
f = Dataset(self.files[6])
Expand Down
2 changes: 1 addition & 1 deletion test/test_compression_bzip2.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def runTest(self):
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':True,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False}
assert(size < 0.96*uncompressed_size)
assert size < 0.96*uncompressed_size
f.close()


Expand Down
28 changes: 14 additions & 14 deletions test/test_compression_quant.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def runTest(self):
assert_almost_equal(data_array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':complevel,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
assert size < 0.95*uncompressed_size
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
Expand All @@ -70,43 +70,43 @@ def runTest(self):
assert_almost_equal(data_array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':complevel,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
assert size < 0.85*uncompressed_size
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
#print('compressed lossy no shuffle = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'BitGroom'))
assert(errmax < 1.e-3)
assert(size < 0.35*uncompressed_size)
assert f.variables['data'].quantization() == (nsd,'BitGroom')
assert errmax < 1.e-3
assert size < 0.35*uncompressed_size
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and standard quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'BitGroom'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
assert f.variables['data'].quantization() == (nsd,'BitGroom')
assert errmax < 1.e-3
assert size < 0.24*uncompressed_size
f.close()
# check lossy compression with shuffle and alternate quantization
f = Dataset(self.files[5])
size = os.stat(self.files[5]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'GranularBitRound'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
assert f.variables['data'].quantization() == (nsd,'GranularBitRound')
assert errmax < 1.e-3
assert size < 0.24*uncompressed_size
f.close()
# check lossy compression with shuffle and alternate quantization
f = Dataset(self.files[6])
size = os.stat(self.files[6]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsb,'BitRound'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
assert f.variables['data'].quantization() == (nsb,'BitRound')
assert errmax < 1.e-3
assert size < 0.24*uncompressed_size
f.close()

if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion test/test_compression_zstd.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def runTest(self):
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':True,'bzip2':False,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False}
assert(size < 0.96*uncompressed_size)
assert size < 0.96*uncompressed_size
f.close()

if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions test/test_dap.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ def runTest(self):
var = ncfile.variables[varname]
data = var[0,...]
assert data.shape == varshape
assert(np.abs(data.min()-data_min) < 10)
assert(np.abs(data.max()-data_max) < 100)
assert np.abs(data.min()-data_min) < 10
assert np.abs(data.max()-data_max) < 100
ncfile.close()
# test https support (linked curl lib must built with openssl support)
ncfile = netCDF4.Dataset(URL_https)
assert(ncfile['hs'].long_name=='Significant Wave Height')
assert ncfile['hs'].long_name=='Significant Wave Height'
ncfile.close()

if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion test/test_dims.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def runTest(self):
dim_tup1 = (f.dimensions['level'],g.dimensions['lat'],\
g.dimensions['lon'],f.dimensions['time'])
dim_tup2 = vg.get_dims()
assert(dim_tup1 == dim_tup2)
assert dim_tup1 == dim_tup2
# check that isunlimited() method works.
for name,dim in g.dimensions.items():
self.assertTrue(dim.isunlimited() == unlimdict[name])
Expand Down
4 changes: 2 additions & 2 deletions test/test_diskless.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ def runTest(self):
assert_array_almost_equal(foo[:], ranarr)
assert_array_almost_equal(bar[:], ranarr2)
# file does not actually exist on disk
assert(os.path.isfile(self.file)==False)
assert os.path.isfile(self.file)==False
# open persisted file.
# first, check that file does actually exist on disk
assert(os.path.isfile(self.file2)==True)
assert os.path.isfile(self.file2)==True
f = netCDF4.Dataset(self.file2)
foo = f.variables['data1']
# check shape.
Expand Down
4 changes: 2 additions & 2 deletions test/test_enum.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ def tearDown(self):
def runTest(self):
with netCDF4.Dataset(file, 'r') as nc:
read_var = nc['evar']
assert(read_var[...] == self.STORED_VAL)
assert(read_et.enum_dict == self.VAL_MAP)
assert read_var[...] == self.STORED_VAL
assert read_et.enum_dict == self.VAL_MAP

if __name__ == '__main__':
unittest.main()
2 changes: 1 addition & 1 deletion test/test_fancyslicing.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def test_get(self):
# slicing with all False booleans (PR #1197)
iby[:] = False
data = v[ibx,iby,ibz]
assert(data.size == 0)
assert data.size == 0

f.close()

Expand Down
6 changes: 3 additions & 3 deletions test/test_grps2.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def runTest(self):
v2 = ((f.groups['grouped']).groups['data']).variables['v']
g = f['/grouped/data']
v3 = g['data2/v2']
assert(v1 == v2)
assert(g == f.groups['grouped'].groups['data'])
assert(v3.name == 'v2')
assert v1 == v2
assert g == f.groups['grouped'].groups['data']
assert v3.name == 'v2'
f.close()

if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion test/test_issue908.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def tearDown(self):

def runTest(self):
data = self.nc['rgrid'][:]
assert(data.all() is np.ma.masked)
assert data.all() is np.ma.masked

if __name__ == '__main__':
unittest.main()
4 changes: 2 additions & 2 deletions test/test_masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,11 @@ def runTest(self):
assert_array_almost_equal(datamasked[:].filled(),ranarr)
assert_array_almost_equal(datamasked2[:].filled(),ranarr2)
assert_array_almost_equal(datapacked[:],packeddata,decimal=4)
assert(datapacked3[:].dtype == np.float64)
assert datapacked3[:].dtype == np.float64
# added to test fix to issue 46 (result before r865 was 10)
assert_array_equal(datapacked2[0],11)
# added test for issue 515
assert(file['v'][0] is np.ma.masked)
assert file['v'][0] is np.ma.masked
file.close()
# issue 766
np.seterr(invalid='raise')
Expand Down
6 changes: 3 additions & 3 deletions test/test_masked4.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ def test_scaled(self):
data2 = v[:]
v.set_auto_maskandscale(False)
data3 = v[:]
assert(data1[(data3 < v.valid_min)].mask.sum() == 12)
assert(data2[(data3 < v.valid_min)].mask.sum() ==
data1[(data3 < v.valid_min)].mask.sum())
assert data1[(data3 < v.valid_min)].mask.sum() == 12
assert data2[(data3 < v.valid_min)].mask.sum() ==\
data1[(data3 < v.valid_min)].mask.sum()


if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion test/test_scaled.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def packparams(dmax, dmin, dtyp):
# error normalized by scale factor
maxerrnorm = np.max(np.abs((vdata - data) / v.scale_factor))
# 1e-5 accounts for floating point errors
assert(maxerrnorm < 0.5 + 1e-5)
assert maxerrnorm < 0.5 + 1e-5
f.close()


Expand Down
2 changes: 1 addition & 1 deletion test/test_shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def runTest(self):
# make sure shape of data array
# is not changed by assigning it
# to a netcdf var with one more dimension (issue 90)
assert(data.shape == datashape)
assert data.shape == datashape
f.close()

if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions test/test_slicing.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,10 @@ def test_0d(self):
assert_array_equal(v[...], 10)
assert_equal(v.shape, v[...].shape)
# issue #785: always return masked array
#assert(type(v[...]) == np.ndarray)
assert(type(v[...]) == np.ma.core.MaskedArray)
#assert type(v[...]) == np.ndarray
assert type(v[...]) == np.ma.core.MaskedArray
f.set_auto_mask(False)
assert(type(v[...]) == np.ndarray)
assert type(v[...]) == np.ndarray
f.close()

def test_issue259(self):
Expand Down
8 changes: 4 additions & 4 deletions test/test_stringarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,19 +70,19 @@ def runTest(self):
assert_array_equal(data3,datau)
# these slices should return a char array, not a string array
data4 = v2[:,:,0]
assert(data4.dtype.itemsize == 1)
assert data4.dtype.itemsize == 1
assert_array_equal(data4, datac[:,:,0])
data5 = v2[0,0:nchar,0]
assert(data5.dtype.itemsize == 1)
assert data5.dtype.itemsize == 1
assert_array_equal(data5, datac[0,0:nchar,0])
# test turning auto-conversion off.
v2.set_auto_chartostring(False)
data6 = v2[:]
assert(data6.dtype.itemsize == 1)
assert data6.dtype.itemsize == 1
assert_array_equal(data6, datac)
nc.set_auto_chartostring(False)
data7 = v3[:]
assert(data7.dtype.itemsize == 1)
assert data7.dtype.itemsize == 1
assert_array_equal(data7, datac)
nc.close()

Expand Down
Loading

0 comments on commit c7c5f4c

Please sign in to comment.