diff --git a/test/test_Unsigned.py b/test/test_Unsigned.py index e2999fe59..f21cf173f 100644 --- a/test/test_Unsigned.py +++ b/test/test_Unsigned.py @@ -34,20 +34,20 @@ def test_unsigned(self): # issue 671 with netCDF4.Dataset(test_dir / "issue671.nc") as f: data1 = f['soil_moisture'][:] - assert(np.ma.isMA(data1)) + assert np.ma.isMA(data1) f.set_auto_scale(False) data2 = f['soil_moisture'][:] - assert(data1.mask.sum() == data2.mask.sum()) + assert data1.mask.sum() == data2.mask.sum() # issue 794 # test that valid_min/valid_max/_FillValue are # treated as unsigned integers. with netCDF4.Dataset(test_dir / "20171025_2056.Cloud_Top_Height.nc") as f: data = f['HT'][:] - assert(data.mask.sum() == 57432) - assert(int(data.max()) == 15430) - assert(int(data.min()) == 0) - assert(data.dtype == np.float32) + assert data.mask.sum() == 57432 + assert int(data.max()) == 15430 + assert int(data.min()) == 0 + assert data.dtype == np.float32 if __name__ == '__main__': diff --git a/test/test_chunk_cache.py b/test/test_chunk_cache.py index 129ce7128..ef2d56b6e 100644 --- a/test/test_chunk_cache.py +++ b/test/test_chunk_cache.py @@ -17,7 +17,7 @@ def setUp(self): # this change lasts only as long as file is open. v = nc.createVariable('frank','f',('fred',),chunk_cache=15000) size, nelems, preempt = v.get_var_chunk_cache() - assert(size==15000) + assert size==15000 self.file=file_name nc.close() @@ -31,10 +31,10 @@ def runTest(self): netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt) nc = netCDF4.Dataset(self.file, mode='r') # check to see that chunk cache parameters were changed. - assert(netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt)) + assert netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt) # change cache parameters for variable, check nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2) - assert(nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2)) + assert nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2) nc.close() if __name__ == '__main__': diff --git a/test/test_compoundatt.py b/test/test_compoundatt.py index fa90e7d0f..e27a05721 100644 --- a/test/test_compoundatt.py +++ b/test/test_compoundatt.py @@ -63,10 +63,10 @@ def runTest(self): assert_array_equal(vv.units['speed'], windunits['speed'].squeeze()) assert_array_equal(vv.units['direction'],\ windunits['direction'].squeeze()) - assert(v.units['speed'] == b'm/s') - assert(v.units['direction'] == b'degrees') - assert(vv.units['speed'] == b'm/s') - assert(vv.units['direction'] == b'degrees') + assert v.units['speed'] == b'm/s' + assert v.units['direction'] == b'degrees' + assert vv.units['speed'] == b'm/s' + assert vv.units['direction'] == b'degrees' f.close() if __name__ == '__main__': diff --git a/test/test_compoundvar.py b/test/test_compoundvar.py index 72b689206..5e1780f9e 100644 --- a/test/test_compoundvar.py +++ b/test/test_compoundvar.py @@ -71,8 +71,8 @@ def setUp(self): dataoutg = vv[:] assert (cmptype4 == dtype4a) # data type should be aligned assert (dataout.dtype == dtype4a) # data type should be aligned - assert(list(f.cmptypes.keys()) ==\ - [TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5]) + assert list(f.cmptypes.keys()) ==\ + [TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5] assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i']) assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j']) assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x']) @@ -99,8 +99,8 @@ def runTest(self): dataoutg = vv[:] # make sure data type is aligned assert (f.cmptypes['cmp4'] == dtype4a) - assert(list(f.cmptypes.keys()) ==\ - [TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5]) + assert list(f.cmptypes.keys()) ==\ + [TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5] assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i']) assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j']) assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x']) @@ -122,7 +122,7 @@ def runTest(self): station_data_t2 = f.createCompoundType(dtype_nest,'station_data') f.createDimension('station',None) statdat = f.createVariable('station_obs', station_data_t2, ('station',)) - assert(statdat.dtype == station_data_t2.dtype) + assert statdat.dtype == station_data_t2.dtype datain = np.empty(2,station_data_t2.dtype_view) datain['observation'][:] = (123,314) datain['station_name'][:] = ('Boulder','New York') @@ -132,7 +132,7 @@ def runTest(self): f.close() f = Dataset(self.file) dataout = f['station_obs'][:] - assert(dataout.dtype == station_data_t2.dtype_view) + assert dataout.dtype == station_data_t2.dtype_view assert_array_equal(datain, dataout) f.close() diff --git a/test/test_compression.py b/test/test_compression.py index f28ed0b96..78827ddff 100644 --- a/test/test_compression.py +++ b/test/test_compression.py @@ -103,7 +103,7 @@ def runTest(self): {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False} assert f.variables['data2'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False} - assert(size < 0.95*uncompressed_size) + assert size < 0.95*uncompressed_size f.close() # check compression with shuffle f = Dataset(self.files[2]) @@ -114,7 +114,7 @@ def runTest(self): {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False} assert f.variables['data2'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False} - assert(size < 0.85*uncompressed_size) + assert size < 0.85*uncompressed_size f.close() # check lossy compression without shuffle f = Dataset(self.files[3]) @@ -122,14 +122,14 @@ def runTest(self): checkarray = _quantize(array,lsd) assert_almost_equal(checkarray,f.variables['data'][:]) assert_almost_equal(checkarray,f.variables['data2'][:]) - assert(size < 0.27*uncompressed_size) + assert size < 0.27*uncompressed_size f.close() # check lossy compression with shuffle f = Dataset(self.files[4]) size = os.stat(self.files[4]).st_size assert_almost_equal(checkarray,f.variables['data'][:]) assert_almost_equal(checkarray,f.variables['data2'][:]) - assert(size < 0.20*uncompressed_size) + assert size < 0.20*uncompressed_size size_save = size f.close() # check lossy compression with shuffle and fletcher32 checksum. @@ -141,9 +141,9 @@ def runTest(self): {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True} assert f.variables['data2'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True} - assert(size < 0.20*uncompressed_size) + assert size < 0.20*uncompressed_size # should be slightly larger than without fletcher32 - assert(size > size_save) + assert size > size_save # check chunksizes f.close() f = Dataset(self.files[6]) diff --git a/test/test_compression_bzip2.py b/test/test_compression_bzip2.py index fb0bd3162..75c0fced1 100644 --- a/test/test_compression_bzip2.py +++ b/test/test_compression_bzip2.py @@ -47,7 +47,7 @@ def runTest(self): assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':True,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} - assert(size < 0.96*uncompressed_size) + assert size < 0.96*uncompressed_size f.close() diff --git a/test/test_compression_quant.py b/test/test_compression_quant.py index 10c801592..3654bf9d5 100644 --- a/test/test_compression_quant.py +++ b/test/test_compression_quant.py @@ -61,7 +61,7 @@ def runTest(self): assert_almost_equal(data_array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':complevel,'fletcher32':False} - assert(size < 0.95*uncompressed_size) + assert size < 0.95*uncompressed_size f.close() # check compression with shuffle f = Dataset(self.files[2]) @@ -70,43 +70,43 @@ def runTest(self): assert_almost_equal(data_array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':complevel,'fletcher32':False} - assert(size < 0.85*uncompressed_size) + assert size < 0.85*uncompressed_size f.close() # check lossy compression without shuffle f = Dataset(self.files[3]) size = os.stat(self.files[3]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() #print('compressed lossy no shuffle = ',size,' max err = ',errmax) - assert(f.variables['data'].quantization() == (nsd,'BitGroom')) - assert(errmax < 1.e-3) - assert(size < 0.35*uncompressed_size) + assert f.variables['data'].quantization() == (nsd,'BitGroom') + assert errmax < 1.e-3 + assert size < 0.35*uncompressed_size f.close() # check lossy compression with shuffle f = Dataset(self.files[4]) size = os.stat(self.files[4]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and standard quantization = ',size,' max err = ',errmax) - assert(f.variables['data'].quantization() == (nsd,'BitGroom')) - assert(errmax < 1.e-3) - assert(size < 0.24*uncompressed_size) + assert f.variables['data'].quantization() == (nsd,'BitGroom') + assert errmax < 1.e-3 + assert size < 0.24*uncompressed_size f.close() # check lossy compression with shuffle and alternate quantization f = Dataset(self.files[5]) size = os.stat(self.files[5]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) - assert(f.variables['data'].quantization() == (nsd,'GranularBitRound')) - assert(errmax < 1.e-3) - assert(size < 0.24*uncompressed_size) + assert f.variables['data'].quantization() == (nsd,'GranularBitRound') + assert errmax < 1.e-3 + assert size < 0.24*uncompressed_size f.close() # check lossy compression with shuffle and alternate quantization f = Dataset(self.files[6]) size = os.stat(self.files[6]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) - assert(f.variables['data'].quantization() == (nsb,'BitRound')) - assert(errmax < 1.e-3) - assert(size < 0.24*uncompressed_size) + assert f.variables['data'].quantization() == (nsb,'BitRound') + assert errmax < 1.e-3 + assert size < 0.24*uncompressed_size f.close() if __name__ == '__main__': diff --git a/test/test_compression_zstd.py b/test/test_compression_zstd.py index dfbccfc4c..9f4259fd0 100644 --- a/test/test_compression_zstd.py +++ b/test/test_compression_zstd.py @@ -47,7 +47,7 @@ def runTest(self): assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':True,'bzip2':False,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} - assert(size < 0.96*uncompressed_size) + assert size < 0.96*uncompressed_size f.close() if __name__ == '__main__': diff --git a/test/test_dap.py b/test/test_dap.py index 9e337b9c5..8d2ea664e 100644 --- a/test/test_dap.py +++ b/test/test_dap.py @@ -30,12 +30,12 @@ def runTest(self): var = ncfile.variables[varname] data = var[0,...] assert data.shape == varshape - assert(np.abs(data.min()-data_min) < 10) - assert(np.abs(data.max()-data_max) < 100) + assert np.abs(data.min()-data_min) < 10 + assert np.abs(data.max()-data_max) < 100 ncfile.close() # test https support (linked curl lib must built with openssl support) ncfile = netCDF4.Dataset(URL_https) - assert(ncfile['hs'].long_name=='Significant Wave Height') + assert ncfile['hs'].long_name=='Significant Wave Height' ncfile.close() if __name__ == '__main__': diff --git a/test/test_dims.py b/test/test_dims.py index 7af5d2ad7..2e1f95ebd 100644 --- a/test/test_dims.py +++ b/test/test_dims.py @@ -131,7 +131,7 @@ def runTest(self): dim_tup1 = (f.dimensions['level'],g.dimensions['lat'],\ g.dimensions['lon'],f.dimensions['time']) dim_tup2 = vg.get_dims() - assert(dim_tup1 == dim_tup2) + assert dim_tup1 == dim_tup2 # check that isunlimited() method works. for name,dim in g.dimensions.items(): self.assertTrue(dim.isunlimited() == unlimdict[name]) diff --git a/test/test_diskless.py b/test/test_diskless.py index 898d345fb..faeba36af 100644 --- a/test/test_diskless.py +++ b/test/test_diskless.py @@ -71,10 +71,10 @@ def runTest(self): assert_array_almost_equal(foo[:], ranarr) assert_array_almost_equal(bar[:], ranarr2) # file does not actually exist on disk - assert(os.path.isfile(self.file)==False) + assert os.path.isfile(self.file)==False # open persisted file. # first, check that file does actually exist on disk - assert(os.path.isfile(self.file2)==True) + assert os.path.isfile(self.file2)==True f = netCDF4.Dataset(self.file2) foo = f.variables['data1'] # check shape. diff --git a/test/test_enum.py b/test/test_enum.py index 6157dde24..0ab42ec6e 100644 --- a/test/test_enum.py +++ b/test/test_enum.py @@ -82,8 +82,8 @@ def tearDown(self): def runTest(self): with netCDF4.Dataset(file, 'r') as nc: read_var = nc['evar'] - assert(read_var[...] == self.STORED_VAL) - assert(read_et.enum_dict == self.VAL_MAP) + assert read_var[...] == self.STORED_VAL + assert read_et.enum_dict == self.VAL_MAP if __name__ == '__main__': unittest.main() diff --git a/test/test_fancyslicing.py b/test/test_fancyslicing.py index 38611b3cb..c3e89fd1b 100644 --- a/test/test_fancyslicing.py +++ b/test/test_fancyslicing.py @@ -145,7 +145,7 @@ def test_get(self): # slicing with all False booleans (PR #1197) iby[:] = False data = v[ibx,iby,ibz] - assert(data.size == 0) + assert data.size == 0 f.close() diff --git a/test/test_grps2.py b/test/test_grps2.py index f157f601f..e76865dfe 100644 --- a/test/test_grps2.py +++ b/test/test_grps2.py @@ -34,9 +34,9 @@ def runTest(self): v2 = ((f.groups['grouped']).groups['data']).variables['v'] g = f['/grouped/data'] v3 = g['data2/v2'] - assert(v1 == v2) - assert(g == f.groups['grouped'].groups['data']) - assert(v3.name == 'v2') + assert v1 == v2 + assert g == f.groups['grouped'].groups['data'] + assert v3.name == 'v2' f.close() if __name__ == '__main__': diff --git a/test/test_issue908.py b/test/test_issue908.py index d07746dff..b86be5a6a 100644 --- a/test/test_issue908.py +++ b/test/test_issue908.py @@ -13,7 +13,7 @@ def tearDown(self): def runTest(self): data = self.nc['rgrid'][:] - assert(data.all() is np.ma.masked) + assert data.all() is np.ma.masked if __name__ == '__main__': unittest.main() diff --git a/test/test_masked.py b/test/test_masked.py index 0794ecded..0775f29ad 100644 --- a/test/test_masked.py +++ b/test/test_masked.py @@ -142,11 +142,11 @@ def runTest(self): assert_array_almost_equal(datamasked[:].filled(),ranarr) assert_array_almost_equal(datamasked2[:].filled(),ranarr2) assert_array_almost_equal(datapacked[:],packeddata,decimal=4) - assert(datapacked3[:].dtype == np.float64) + assert datapacked3[:].dtype == np.float64 # added to test fix to issue 46 (result before r865 was 10) assert_array_equal(datapacked2[0],11) # added test for issue 515 - assert(file['v'][0] is np.ma.masked) + assert file['v'][0] is np.ma.masked file.close() # issue 766 np.seterr(invalid='raise') diff --git a/test/test_masked4.py b/test/test_masked4.py index d2649958d..14dd14fc4 100755 --- a/test/test_masked4.py +++ b/test/test_masked4.py @@ -115,9 +115,9 @@ def test_scaled(self): data2 = v[:] v.set_auto_maskandscale(False) data3 = v[:] - assert(data1[(data3 < v.valid_min)].mask.sum() == 12) - assert(data2[(data3 < v.valid_min)].mask.sum() == - data1[(data3 < v.valid_min)].mask.sum()) + assert data1[(data3 < v.valid_min)].mask.sum() == 12 + assert data2[(data3 < v.valid_min)].mask.sum() ==\ + data1[(data3 < v.valid_min)].mask.sum() if __name__ == '__main__': diff --git a/test/test_scaled.py b/test/test_scaled.py index 5020beb93..4a73ba3f7 100755 --- a/test/test_scaled.py +++ b/test/test_scaled.py @@ -187,7 +187,7 @@ def packparams(dmax, dmin, dtyp): # error normalized by scale factor maxerrnorm = np.max(np.abs((vdata - data) / v.scale_factor)) # 1e-5 accounts for floating point errors - assert(maxerrnorm < 0.5 + 1e-5) + assert maxerrnorm < 0.5 + 1e-5 f.close() diff --git a/test/test_shape.py b/test/test_shape.py index cba8ca5f9..5a924491b 100644 --- a/test/test_shape.py +++ b/test/test_shape.py @@ -31,7 +31,7 @@ def runTest(self): # make sure shape of data array # is not changed by assigning it # to a netcdf var with one more dimension (issue 90) - assert(data.shape == datashape) + assert data.shape == datashape f.close() if __name__ == '__main__': diff --git a/test/test_slicing.py b/test/test_slicing.py index 1b5c0bde2..8d3f88b7d 100644 --- a/test/test_slicing.py +++ b/test/test_slicing.py @@ -102,10 +102,10 @@ def test_0d(self): assert_array_equal(v[...], 10) assert_equal(v.shape, v[...].shape) # issue #785: always return masked array - #assert(type(v[...]) == np.ndarray) - assert(type(v[...]) == np.ma.core.MaskedArray) + #assert type(v[...]) == np.ndarray + assert type(v[...]) == np.ma.core.MaskedArray f.set_auto_mask(False) - assert(type(v[...]) == np.ndarray) + assert type(v[...]) == np.ndarray f.close() def test_issue259(self): diff --git a/test/test_stringarr.py b/test/test_stringarr.py index 6cbe0a684..8b57eceaa 100644 --- a/test/test_stringarr.py +++ b/test/test_stringarr.py @@ -70,19 +70,19 @@ def runTest(self): assert_array_equal(data3,datau) # these slices should return a char array, not a string array data4 = v2[:,:,0] - assert(data4.dtype.itemsize == 1) + assert data4.dtype.itemsize == 1 assert_array_equal(data4, datac[:,:,0]) data5 = v2[0,0:nchar,0] - assert(data5.dtype.itemsize == 1) + assert data5.dtype.itemsize == 1 assert_array_equal(data5, datac[0,0:nchar,0]) # test turning auto-conversion off. v2.set_auto_chartostring(False) data6 = v2[:] - assert(data6.dtype.itemsize == 1) + assert data6.dtype.itemsize == 1 assert_array_equal(data6, datac) nc.set_auto_chartostring(False) data7 = v3[:] - assert(data7.dtype.itemsize == 1) + assert data7.dtype.itemsize == 1 assert_array_equal(data7, datac) nc.close() diff --git a/test/test_types.py b/test/test_types.py index 1fc290957..0bd910a3f 100644 --- a/test/test_types.py +++ b/test/test_types.py @@ -81,7 +81,7 @@ def runTest(self): v2 = f.variables['issue273'] assert type(v2._FillValue) == bytes assert v2._FillValue == b'\x00' - assert(str(issue273_data) == str(v2[:])) + assert str(issue273_data) == str(v2[:]) # issue 707 (don't apply missing_value if cast to variable type is # unsafe) v3 = f.variables['issue707'] diff --git a/test/test_unicodeatt.py b/test/test_unicodeatt.py index 61345950d..50927f95a 100644 --- a/test/test_unicodeatt.py +++ b/test/test_unicodeatt.py @@ -23,12 +23,12 @@ def tearDown(self): def runTest(self): """testing unicode attributes""" nc = Dataset(self.file, 'r') - assert(nc.stratt.encode('utf-8') == b'\xe6\xb7\xb1\xe5\x85\xa5 Python') + assert nc.stratt.encode('utf-8') == b'\xe6\xb7\xb1\xe5\x85\xa5 Python' stratt2 = nc.getncattr('stratt2',encoding='big5') # decodes using big5 stratt3 = nc.getncattr('stratt3',encoding='big5') # same as above - assert(stratt2.encode('big5') == b'\xb2`\xa4J Python') - assert(nc.stratt == stratt2) # decoded strings are the same - assert(nc.stratt == stratt3) # decoded strings are the same + assert stratt2.encode('big5') == b'\xb2`\xa4J Python' + assert nc.stratt == stratt2 # decoded strings are the same + assert nc.stratt == stratt3 # decoded strings are the same nc.close() if __name__ == '__main__': diff --git a/test/test_vlen.py b/test/test_vlen.py index 9e51bfb2d..1e1d89f72 100644 --- a/test/test_vlen.py +++ b/test/test_vlen.py @@ -70,7 +70,7 @@ def runTest(self): data2 = v[:] data2s = vs[:] # issue #1306 - assert repr(vs[[0,2,3],0]) == "array(['ab', 'abcdefghijkl', 'abcdefghijklmnopq'], dtype=object)" + assert repr(vs[[0,2,3],0]) == "array(['ab', 'abcdefghijkl', 'abcdefghijklmnopq'], dtype=object)" for i in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) @@ -219,11 +219,11 @@ def runTest(self): data = nc['vl'][-1] # check max error of compression err = np.abs(data - self.data) - assert(err.max() < nc['vl'].scale_factor) + assert err.max() < nc['vl'].scale_factor # turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] - assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) + assert data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor) nc.close() if __name__ == '__main__':