diff --git a/build/lib/CMS/core/cmslib.py b/build/lib/CMS/core/cmslib.py index 6d107e5..203d202 100644 --- a/build/lib/CMS/core/cmslib.py +++ b/build/lib/CMS/core/cmslib.py @@ -91,12 +91,12 @@ def nlevinson(acc): -_cmslib.scan4d.argtypes = [c_dPt, c_i32Pt, c_dPt,c_int32, c_int32, c_int32, c_int32, c_int64, c_int64] -_cmslib.detect4d.argtypes = [c_dPt, c_dPt,c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] -_cmslib.detect4d_t.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] +_cmslib.scan4d.argtypes = [c_dPt, c_i32Pt, c_dPt, c_int32, c_int32, c_int32, c_int32, c_int64, c_int64] +_cmslib.detect4d.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] +# _cmslib.detect4d_t.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] -def scan(sig, tt, fsmp, lsmp, map4d,threads): - nstn, nsamp = sig.shape +def scan(sig, tt, fsmp,lsmp, nsamp, map4d, threads): + nstn, ssmp = sig.shape if not tt.shape[-1] == nstn: raise ValueError('Mismatch between number of stations for data and LUT, {} - {}.'.format( nstn, tt.shape[-1])) @@ -104,24 +104,27 @@ def scan(sig, tt, fsmp, lsmp, map4d,threads): tcell = np.prod(ncell) if map4d.size < nsamp*tcell: raise ValueError('4D-Array is too small.') - _cmslib.scan4d(sig, tt, map4d, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int32(nstn), c_int64(tcell), c_int64(threads)) + if sig.size < nsamp + fsmp: + raise ValueError('Data array smaller than Coalescence array') -def detect(mmap, dsnr, dind, fsmp, lsmp, threads): + + _cmslib.scan4d(sig, tt, map4d, c_int32(fsmp), c_int32(lsmp),c_int32(nsamp), c_int32(nstn), c_int64(tcell), c_int64(threads)) + + +def detect(mmap, dsnr, dind, fsmp, lsmp,threads): nsamp = mmap.shape[-1] ncell = np.prod(mmap.shape[:-1]) if dsnr.size < nsamp or dind.size < nsamp: raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) - _cmslib.detect4d(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int64(ncell), c_int64(threads)) + _cmslib.detect4d(mmap, dsnr, dind, c_int32(fsmp),c_int32(lsmp),c_int32(nsamp), c_int64(ncell), c_int64(threads)) -def detect_t(mmap, dsnr, dind, fsmp, lsmp, threads): - nsamp = mmap.shape[0] - ncell = np.prod(mmap.shape[1:]) - if dsnr.size < nsamp or dind.size < nsamp: - raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) - _cmslib.detect4d_t(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int64(ncell), c_int64(threads)) +# def detect_t(mmap, dsnr, dind, fsmp, lsmp, threads): +# nsamp = mmap.shape[0] +# ncell = np.prod(mmap.shape[1:]) +# if dsnr.size < nsamp or dind.size < nsamp: +# raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) +# _cmslib.detect4d_t(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), +# c_int32(nsamp), c_int64(ncell), c_int64(threads)) diff --git a/build/lib/CMS/core/model.py b/build/lib/CMS/core/model.py index 19204e4..8ba5bc3 100644 --- a/build/lib/CMS/core/model.py +++ b/build/lib/CMS/core/model.py @@ -12,7 +12,7 @@ import numpy as np import pyproj -from scipy.interpolate import RectBivariateSpline, RegularGridInterpolator, griddata +from scipy.interpolate import RectBivariateSpline, RegularGridInterpolator, griddata, interp1d import matplotlib matplotlib.use('Agg') import matplotlib.pylab as plt @@ -83,14 +83,30 @@ def _proj_wgs84(): def _proj_nad27(): return pyproj.Proj("+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs") # "+init=EPSG:4267" +def _utm_zone(longitude): + return (int(1 + math.fmod((longitude + 180.0) / 6.0, 60))) def _proj_wgs84_utm(longitude): - zone = (int(1 + math.fmod((longitude + 180.0) / 6.0, 60))) + zone = _utm_zone(longitude) return pyproj.Proj("+proj=utm +zone={0:d} +datum=WGS84 +units=m +no_defs".format(zone)) +def _proj_wgs84_lambertcc(lon_Org,lat_Org,lat_1pl,lat_2pl): + return pyproj.Proj("+proj=lcc +lon_0={} +lat_0={} +lat_1={} +lat_2={} +datum=WGS84 +units=m +no_defs".format(float(lon_Org),float(lat_Org),float(lat_1pl),float(lat_2pl))) -def eikonal(x,y,z,V,S): - ''' +def _proj_wgs84_tm(lon_Org,lat_Org): + return pyproj.Proj("+proj=tmerc +lon_0={} +lat_0={} +datum=WGS84 +units=m +no_defs".format(float(lon_Org),float(lat_Org))) + + +# def _proj_nlloc_simple(latOrg,lonOrg,rotAngle): +# x = (long - longOrig) * 111.111 * cos(lat_radians) +# y = (lat - latOrig) * 111.111 +# lat = latOrig + y / 111.111 +# long = longOrig + x / (111.111 * cos(lat_radians)) +# x=(lon) + + +def eikonal(ix,iy,iz,dxi,dyi,dzi,V,S): + ''' Travel-Time formulation using a simple eikonal method. Requires the skifmm python package. @@ -102,41 +118,13 @@ def eikonal(x,y,z,V,S): S - Definition of the station location in grid Outputs: - t - Travel-time numpy array + t - Travel-time numpy array ''' - t=[] - - dx = float(x[1]-x[0]) - phi = -1*np.ones_like(V) - - if S.ndim==1: - S=np.array([S]); - ns=1 - ns, ndim = S.shape - else: - ns, ndim = S.shape - - for i in range(ns): - # get location of source - #print(i) - ix = np.abs(x-S[i,0]).argmin(); - if ndim>1: - iy = np.abs(y-S[i,1]).argmin(); - if ndim>2: - iz = np.abs(z-S[i,2]).argmin(); - - if ndim>2: - phi[iy,ix,iz]=1; - elif ndim>1: - phi[iy,ix]=1; - else: - phi[ix]=1; - - t_comp = skfmm.travel_time(phi, V, dx) - - t.append(t_comp) - + phi = -np.ones(ix.shape) + indx = np.argmin(abs((ix - S[:,0])) + abs((iy - S[:,1])) + abs((iz - S[:,2]))) + phi[np.unravel_index(indx,ix.shape)] = 1.0 + t = skfmm.travel_time(phi,V,dx=[dxi,dyi,dzi]) return t @@ -160,6 +148,8 @@ def __init__(self, center=np.array([10000.0, 10000.0, -5000.0]), cell_count=np.a self.grid_azimuth = azimuth self.grid_dip = dip self.sort_order = sort_order + self.UTM_zones_different = False + self.lcc_standard_parallels=(0.0,0.0) @property def grid_center(self): @@ -241,6 +231,15 @@ def set_proj(self, coord_proj=None, grid_proj=None): self._grid_proj = grid_proj self._update_coord() + def _nlloc_grid_proj(self): + if self.NLLoc_proj: + if self.NLLoc_proj == 'SIMPLE': + return "ERROR -- simple not yet supported" + elif self.NLLoc_proj == 'LAMBERT': + return _proj_wgs84_lambertcc(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1],self.NLLoc_MapOrg[4],self.NLLoc_MapOrg[5]) + elif self.NLLoc_proj == 'TRANS_MERC': + return _proj_wgs84_tm(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) + def get_grid_proj(self): if self._grid_proj is None: warnings.warn("Grid Projection has not been set: Assuming WGS84") @@ -274,6 +273,23 @@ def _update_coord(self): else: return False + def get_NLLOC_gridcenter(self,NLLOCorg_lon,NLLOCorg_lat): + self._longitude = NLLOCorg_lon + self._coord_proj = _proj_wgs84() + if self.NLLoc_proj is not 'NONE': + self._grid_proj = self._nlloc_grid_proj() + self.grid_origin_xy=self.lonlat2xy(NLLOCorg_lon,NLLOCorg_lat) + self._grid_center[0],self._grid_center[1]=(self.grid_origin_xy[0]+self.center[0],self.grid_origin_xy[1]+self.center[1]) + self._longitude,self._latitude=self.xy2lonlat(self._grid_center[0],self._grid_center[1]) + # if _utm_zone(self.longitude) != _utm_zone(NLLOCorg_lon): + # self.UTM_zones_different=True + # self._coord_proj = _proj_wgs84() + # self._grid_proj = _proj_wgs84_utm(self.longitude) + # self.grid_origin_xy=self.lonlat2xy(NLLOCorg_lon,NLLOCorg_lat) + # self._grid_center[0],self._grid_center[1]=(self.grid_origin_xy[0]+self.center[0],self.grid_origin_xy[1]+self.center[1]) + # self._longitude,self._latitude=self.xy2lonlat(self._grid_center[0],self._grid_center[1]) + self._update_grid_center() + def set_lonlat(self, longitude=None, latitude=None, coord_proj=None, grid_proj=None): if coord_proj: self._coord_proj = coord_proj @@ -285,9 +301,16 @@ def set_lonlat(self, longitude=None, latitude=None, coord_proj=None, grid_proj=N self._longitude = longitude self._update_grid_center() - def setproj_wgs84(self): + def setproj_wgs84(self,proj): self._coord_proj = _proj_wgs84() - self._grid_proj = _proj_wgs84_utm(self.longitude) + if proj == 'UTM': + self._grid_proj = _proj_wgs84_utm(self.longitude) + elif proj == 'LCC': + self._grid_proj = _proj_wgs84_lambertcc(self.longitude,self.latitude,self.lcc_standard_parallels[0],self.lcc_standard_parallels[1]) + elif proj == 'TM': + self._grid_proj = _proj_wgs84_tm(self.longitude,self.latitude) + else: + raise Exception('Projection type must be specified! CMS currently supports UTM, LCC (Lambert Conical Conformic) or TM (Transverse Mercator)') if not self._update_grid_center(): self._update_coord() @@ -333,10 +356,32 @@ def xyz2coord(self, loc): def loc2coord(self,loc): return self.xyz2coord(self.loc2xyz(loc)) + def coord2loc(self,loc): + return self.xyz2loc(self.coord2xyz(loc)) + def coord2xyz(self, loc): X, Y = self.lonlat2xy(loc[:,0], loc[:,1]) - return np.array([X, Y, loc[:,2]]).transpose() + Z = loc[:,2] + + Bounds = self.get_grid_xyz() + Xmin,Ymin,Zmin = np.min(Bounds,axis=0) + Xmax,Ymax,Zmax = np.max(Bounds,axis=0) + + if X < Xmin: + X = np.array([Xmin + self._cell_size[0]/2]) + if X > Xmax: + X = np.array([Xmax - self._cell_size[0]/2]) + if Y < Ymin: + Y = np.array([Ymin + self._cell_size[1]/2]) + if Y > Ymax: + Y = np.array([Ymax - self._cell_size[1]/2]) + if Z < Zmin: + Z = np.array([Zmin + self._cell_size[2]/2]) + if Z > Zmax: + Z = np.array([Zmax - self._cell_size[2]/2]) + + return np.array([X,Y,Z]).transpose() def coord2index(self,coord): return self.loc2index(self.coord2loc(coord)) @@ -414,11 +459,14 @@ def NLLOC_LoadFile(self,FileName): self.NLLoc_MapOrg = [trans[5],trans[3],trans[7],'Simple','0.0','0.0'] if trans[1] == 'LAMBERT': self.NLLoc_proj = 'LAMBERT' - print([trans[7],trans[5],trans[13],trans[3],trans[9],trans[11]]) self.NLLoc_MapOrg = [trans[7],trans[5],trans[13],trans[3],trans[9],trans[11]] + if trans[1] == 'TRANS_MERC': + self.NLLoc_proj = 'TRANS_MERC' + self.NLLoc_MapOrg = [trans[7],trans[5],trans[9],trans[3],'0.0','0.0'] - # Reading the buf file + + # Reading the buf file fid = open('{}.buf'.format(FileName),'rb') data = struct.unpack('{}f'.format(self.NLLoc_n[0]*self.NLLoc_n[1]*self.NLLoc_n[2]),fid.read(self.NLLoc_n[0]*self.NLLoc_n[1]*self.NLLoc_n[2]*4)) self.NLLoc_data = np.array(data).reshape(self.NLLoc_n[0],self.NLLoc_n[1],self.NLLoc_n[2]) @@ -426,11 +474,11 @@ def NLLOC_LoadFile(self,FileName): def NLLOC_ProjectGrid(self): ''' - Projecting the grid to the new coordinate system. This function also determines the 3D grid from the 2D - grids from NonLinLoc + Projecting the grid to the new coordinate system. This function also determines the 3D grid from the 2D + grids from NonLinLoc ''' - # Generating the correct NonLinLoc Formated Grid + # Generating the correct NonLinLoc Formatted Grid if (self.NLLoc_proj == 'NONE'): GRID_NLLOC = Grid3D(center=(self.NLLoc_org + self.NLLoc_siz*self.NLLoc_n), cell_count=self.NLLoc_n,cell_size=self.NLLoc_siz,azimuth=0.0, dip=0.0, sort_order='C') @@ -438,12 +486,16 @@ def NLLOC_ProjectGrid(self): GRID_NLLOC = Grid3D(center=(self.NLLoc_org + self.NLLoc_siz*self.NLLoc_n), cell_count=self.NLLoc_n,cell_size=self.NLLoc_siz,azimuth=self.NLLoc_MapOrg[2], dip=0.0, sort_order='C') GRID_NLLOC.set_lonlat(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) - if (self.NLLoc_proj == 'LAMBERT'): GRID_NLLOC = Grid3D(center=(self.NLLoc_org + self.NLLoc_siz*self.NLLoc_n), cell_count=self.NLLoc_n,cell_size=self.NLLoc_siz,azimuth=self.NLLoc_MapOrg[2], dip=0.0, sort_order='C') GRID_NLLOC.set_lonlat(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) GRID_NLLOC.set_proj(self.NLLoc_MapOrg[3]) + if (self.NLLoc_proj == 'TRANS_MERC'): + GRID_NLLOC = Grid3D(center=(self.NLLoc_org + self.NLLoc_siz*self.NLLoc_n), cell_count=self.NLLoc_n,cell_size=self.NLLoc_siz,azimuth=self.NLLoc_MapOrg[2], dip=0.0, sort_order='C') + GRID_NLLOC.set_lonlat(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) + GRID_NLLOC.set_proj(self.NLLoc_MapOrg[3]) + OrgX,OrgY,OrgZ = GRID_NLLOC.get_grid_xyz(cells='full') NewX,NewY,NewZ = self.get_grid_xyz(cells='full') @@ -454,7 +506,7 @@ def NLLOC_ProjectGrid(self): def NLLOC_RedefineGrid(self,Decimate): ''' - Redefining coordinate system to the file loaded + Redefining coordinate system to the file loaded ''' # Decimating the grid by the factor defined @@ -468,18 +520,21 @@ def NLLOC_RedefineGrid(self,Decimate): if (self.NLLoc_proj == 'NONE'): self.azimuth = 0.0 self.grid_center = self.center - + if (self.NLLoc_proj == 'SIMPLE'): self.azimuth = self.NLLoc_MapOrg[2] - self.set_lonlat(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) - self.grid_center = self.center - self.setproj_wgs84() + self.get_NLLOC_gridcenter(float(self.NLLoc_MapOrg[0]),float(self.NLLoc_MapOrg[1])) + self.grid_center[2] = self.center[2] if (self.NLLoc_proj == 'LAMBERT'): - self.azimuth = self.NLLoc_MapOrg[2] - self.set_lonlat(self.NLLoc_MapOrg[0],self.NLLoc_MapOrg[1]) - self.grid_center = self.center - self.setproj_wgs84() + self.azimuth = float(self.NLLoc_MapOrg[2]) + self.get_NLLOC_gridcenter(float(self.NLLoc_MapOrg[0]),float(self.NLLoc_MapOrg[1])) + self.grid_center[2] = self.center[2] + + if (self.NLLoc_proj == 'TRANS_MERC'): + self.azimuth = float(self.NLLoc_MapOrg[2]) + self.get_NLLOC_gridcenter(float(self.NLLoc_MapOrg[0]),float(self.NLLoc_MapOrg[1])) + self.grid_center[2] = self.center[2] self.NLLoc_data = self.decimate_array(self.NLLoc_data,np.array(Decimate))[:,:,::-1] @@ -501,10 +556,10 @@ class LUT(Grid3D,NonLinLoc): ''' # Additions to be made to the program: - # - Weighting of the stations with distance, allow the user to define their own tables - # or define a fixed weighting for the problem. + # - Weighting of the stations with distance, allow the user to define their own tables + # or define a fixed weighting for the problem. # - # - + # - # # @@ -518,7 +573,7 @@ def __init__(self, center=np.array([10000.0, 10000.0, -5000.0]), cell_count=np.a self.velocity_model = None self.station_data = None self._maps = dict() - self.data = None + self.data = None @property def maps(self): @@ -539,8 +594,8 @@ def _select_station(self, station_data): flag[i] = True def decimate(self, ds, inplace=False): - ''' - Function used to decimate the travel-time tables either supplied by NonLinLoc or through + ''' + Function used to decimate the travel-time tables either supplied by NonLinLoc or through the inbuilt functions: @@ -582,7 +637,7 @@ def decimate_array(self,DATA,ds): self.center = center ARRAY = np.ascontiguousarray(DATA[c1[0]::ds[0], c1[1]::ds[1], c1[2]::ds[2]]) - return ARRAY + return ARRAY def get_station_xyz(self, station=None): @@ -668,10 +723,10 @@ def set_station(self,loc,units): def compute_Homogeous(self,VP,VS): - ''' - Function used to compute Travel-time tables in a homogeous + ''' + Function used to compute Travel-time tables in a homogeous velocity model - + Input: VP - P-wave velocity (km/s, float) VS - S-wave velocity (km/s, float) @@ -692,9 +747,6 @@ def compute_Homogeous(self,VP,VS): self.maps = {'TIME_P': map_p1, 'TIME_S': map_s1} - - - def compute_1DVelocity(self,Z,VP,VS): ''' Function used to compute Travel-time tables in a 1D Velocity model @@ -710,65 +762,75 @@ def compute_1DVelocity(self,Z,VP,VS): # Interpolating the velocity model to each point in the 3D grid. Defined Smoothing parameter based by + + stn = self.get_station_xyz() coord = self.get_grid_xyz() ix, iy, iz = self.get_grid_xyz(cells='all') - ttp = np.zeros(ix.shape + (nstn,)) - tts = np.zeros(ix.shape + (nstn,)) + ttp = np.zeros(ix.shape + (stn.shape[0],)) + tts = np.zeros(ix.shape + (stn.shape[0],)) + + Z = np.insert(np.append(Z,-np.inf),0,np.inf) +# print(Z) + VP = np.insert(np.append(VP,VP[-1]),0,VP[0]) + VS = np.insert(np.append(VS,VS[-1]),0,VS[0]) - gvp = np.interp(iz, -Z, VP) - gvs = np.interp(iz, -Z, VS) + f = interp1d(Z,VP) + gvp = f(iz) + f = interp1d(Z,VS) + gvs = f(iz) for s in range(stn.shape[0]): - print("Generating 1D Travel-Time Table - {}".format(i)) + print("Generating 1D Travel-Time Table - {} of {}".format(s+1,stn.shape[0])) x = np.arange(min(coord[:,0]),max(coord[:,0]),self.cell_size[0]) - y = np.arange(min(coord[:,1]),max(coord[:,1]),self.cell_size[1]) - Z = np.arange(min(coord[:,2]),max(coord[:,2]),self.cell_size[2]) + y = -np.arange(min(coord[:,1]),max(coord[:,1]),self.cell_size[1]) + z = np.arange(min(coord[:,2]),max(coord[:,2]),self.cell_size[2]) - ttp[..., p] = eikonal(x,y,z,gvp,np.array([s]))[0] - tts[..., s] = eikonal(x,y,z,gvs,np.array([s]))[0] + #print(eikonal(x,y,z,gvp,np.array([s]))) - self.maps = {'TIME_P': ttp1, 'TIME_S': tts} + ttp[..., s] = eikonal(ix,iy,iz,self.cell_size[0],self.cell_size[1],self.cell_size[2],gvp,stn[s][np.newaxis,:]) + tts[..., s] = eikonal(ix,iy,iz,self.cell_size[0],self.cell_size[1],self.cell_size[2],gvs,stn[s][np.newaxis,:]) + self.maps = {'TIME_P': ttp, 'TIME_S': tts} - def compute_3DVelocity(self,INPUT_FILE): - ''' - Function used to compute Travel-time tables in a 1D Velocity model - defined using the input VP and VS arrays +# def compute_3DVelocity(self,INPUT_FILE): +# ''' +# Function used to compute Travel-time tables in a 1D Velocity model +# defined using the input VP and VS arrays - INPUTS: - INPUT_FILE - File containg comma seperated X,Y,Z,VP,VS +# INPUTS: +# INPUT_FILE - File containg comma seperated X,Y,Z,VP,VS - ''' - # Constructing the velocity model - # Interpolating the velocity model to each point in the 3D grid. Defined Smoothing parameter based by +# ''' +# # Constructing the velocity model +# # Interpolating the velocity model to each point in the 3D grid. Defined Smoothing parameter based by - VEL = pd.read_csv(INPUT_FILE,names=['X','Y','Z','VP','VS']) +# VEL = pd.read_csv(INPUT_FILE,names=['X','Y','Z','VP','VS']) - stn = self.get_station_xyz() - coord = self.get_grid_xyz() - ix, iy, iz = self.get_grid_xyz(cells='all') - ttp = np.zeros(ix.shape + (nstn,)) - tts = np.zeros(ix.shape + (nstn,)) +# stn = self.get_station_xyz() +# coord = self.get_grid_xyz() +# ix, iy, iz = self.get_grid_xyz(cells='all') +# ttp = np.zeros(ix.shape + (nstn,)) +# tts = np.zeros(ix.shape + (nstn,)) - gvp = scipy.interpolate.griddata(VEL[['X','Y','Z']], VEL['VP'], (ix,iy,iz), 'linear') - gvs = scipy.interpolate.griddata(VEL[['X','Y','Z']], VEL['VP'], (ix,iy,iz), 'linear') +# gvp = scipy.interpolate.griddata(VEL[['X','Y','Z']], VEL['VP'], (ix,iy,iz), 'linear') +# gvs = scipy.interpolate.griddata(VEL[['X','Y','Z']], VEL['VP'], (ix,iy,iz), 'linear') - for s in range(stn.shape[0]): - print("Generating 1D Travel-Time Table - {}".format(i)) +# for s in range(stn.shape[0]): +# print("Generating 1D Travel-Time Table - {}".format(i)) - x = np.arange(min(coord[:,0]),max(coord[:,0]),self.cell_size[0]) - y = np.arange(min(coord[:,1]),max(coord[:,1]),self.cell_size[1]) - Z = np.arange(min(coord[:,2]),max(coord[:,2]),self.cell_size[2]) +# x = np.arange(min(coord[:,0]),max(coord[:,0]),self.cell_size[0]) +# y = np.arange(min(coord[:,1]),max(coord[:,1]),self.cell_size[1]) +# Z = np.arange(min(coord[:,2]),max(coord[:,2]),self.cell_size[2]) - ttp[..., p] = eikonal(x,y,z,gvp,np.array([s]))[0] - tts[..., s] = eikonal(x,y,z,gvs,np.array([s]))[0] +# ttp[..., p] = eikonal(x,y,z,gvp,stn[s][np.newaxis,:])[0] +# tts[..., s] = eikonal(x,y,z,gvs,stn[s][np.newaxis,:])[0] - self.maps = {'TIME_P': ttp1, 'TIME_S': tts} +# self.maps = {'TIME_P': ttp1, 'TIME_S': tts} @@ -776,12 +838,12 @@ def compute_3DVelocity(self,INPUT_FILE): def compute_3DNLLoc(self,PATH,RedefineCoord=False,Decimate=[1,1,1]): ''' - Function to read in NonLinLoc Tables to be used for the Travel-Time - tables. + Function to read in NonLinLoc Tables to be used for the Travel-Time + tables. INPUTS: PATH - Full path to where the .buf and .hdr files can be found from - the NonLinLoc output files + the NonLinLoc output files @@ -790,13 +852,13 @@ def compute_3DNLLoc(self,PATH,RedefineCoord=False,Decimate=[1,1,1]): for st in range(nstn): name = self.station_data['Name'][st] print('Loading TTp and TTs for {}'.format(name)) - + # Reading in P-wave self.NLLOC_LoadFile('{}.P.{}.time'.format(PATH,name)) if (RedefineCoord == False): self.NLLOC_ProjectGrid() - else: + else: self.NLLOC_RedefineGrid(Decimate) if ('map_p1' not in locals()) and ('map_s1' not in locals()): @@ -821,7 +883,7 @@ def compute_3DNLLoc(self,PATH,RedefineCoord=False,Decimate=[1,1,1]): self.maps = {'TIME_P':map_p1, 'TIME_S':map_s1} - + def save(self,FILENAME): ''' @@ -843,7 +905,7 @@ def load(self,FILENAME): def plot_station(self): - ''' + ''' Function to plot a 2D representation of the station locations ''' @@ -925,4 +987,4 @@ def plot_station(self): # else: # plt.show() - + diff --git a/build/lib/CMS/io/mseed.py b/build/lib/CMS/io/mseed.py index 09b8b2b..e178c4d 100644 --- a/build/lib/CMS/io/mseed.py +++ b/build/lib/CMS/io/mseed.py @@ -4,6 +4,8 @@ # ---- Import Packages ----- import obspy from obspy import UTCDateTime +import CMS.core.model as cmod + from datetime import datetime from datetime import timedelta @@ -26,11 +28,7 @@ def _downsample(st,sr): class MSEED(): - def __init__(self,lut,HOST_PATH='/PATH/MSEED'): - - - self.lookup_table = lut - + def __init__(self,LUT,HOST_PATH='/PATH/MSEED'): self.startTime = None self.endTime = None self.sampling_rate = None @@ -41,7 +39,12 @@ def __init__(self,lut,HOST_PATH='/PATH/MSEED'): self.signal = None self.FilteredSignal = None self.StationAvaliability = None + + lut = cmod.LUT() + lut.load(LUT) self.StationInformation = lut.station_data + del lut + self.st = None def _stationAvaliability(self,st): @@ -53,17 +56,16 @@ def _stationAvaliability(self,st): # Since the traces are the same sample-rates then the stations can be selected based #on the start and end time - exSamples = (endT-stT).total_seconds()*self.sampling_rate + 1 - + exSamples = round((endT-stT).total_seconds()*self.sampling_rate + 1) - stationAva = np.zeros((len(self.lookup_table.station_data['Name']),1)) - signal = np.zeros((3,len(self.lookup_table.station_data['Name']),int(exSamples))) + stationAva = np.zeros((len(self.StationInformation['Name']),1)) + signal = np.zeros((3,len(self.StationInformation['Name']),int(exSamples))) - for i in range(0,len(self.lookup_table.station_data['Name'])): + for i in range(0,len(self.StationInformation['Name'])): - tmp_st = st.select(station=self.lookup_table.station_data['Name'][i]) + tmp_st = st.select(station=self.StationInformation['Name'][i]) if len(tmp_st) == 3: - if tmp_st[0].stats.npts == exSamples and tmp_st[1].stats.npts == exSamples and tmp_st[2].stats.npts == exSamples: + if tmp_st[0].stats.npts <= exSamples and tmp_st[1].stats.npts == exSamples and tmp_st[2].stats.npts == exSamples: # Defining the station as avaliable stationAva[i] = 1 @@ -82,9 +84,6 @@ def _stationAvaliability(self,st): # Trace not completly active during this period continue - - - return signal,stationAva @@ -98,7 +97,8 @@ def path_structure(self,TYPE='YEAR/JD/STATION'): if TYPE == 'YEAR/JD/STATION': self.Type = 'YEAR/JD/STATION' - + if TYPE == 'STATION.YEAR.JULIANDAY': + self.Type = 'STATION.YEAR.JULIANDAY' def _load_fromPath(self): @@ -113,15 +113,32 @@ def _load_fromPath(self): if self.Type == 'YEAR/JD/STATION': dy = 0 FILES = [] - while (self.endTime.timetuple().tm_yday) >= (self.startTime + timedelta(days=dy)).timetuple().tm_yday: + #print(float(self.endTime.year) + float('0.{}'.format(self.endTime.timetuple().tm_yday))) + #print(float(self.startTime.year) + float('0.{}'.format((self.startTime + timedelta(days=dy)).timetuple().tm_yday))) + while self.endTime.timetuple().tm_yday >= (self.startTime + timedelta(days=dy)).timetuple().tm_yday: # Determine current time ctime = self.startTime + timedelta(days=dy) + #print(ctime) + for st in self.StationInformation['Name'].tolist(): + FILES.extend(glob('{}/{}/{}/*{}*'.format(self.MSEED_path,ctime.year,str(ctime.timetuple().tm_yday).zfill(3),st))) + + dy += 1 - for st in self.lookup_table.station_data['Name'].tolist(): - FILES.extend(glob('{}/{}/{}/*{}*'.format(self.MSEED_path,ctime.year,ctime.timetuple().tm_yday,st))) + if self.Type == 'STATION.YEAR.JULIANDAY': + dy = 0 + FILES = [] + #print(float(self.endTime.year) + float('0.{}'.format(self.endTime.timetuple().tm_yday))) + #print(float(self.startTime.year) + float('0.{}'.format((self.startTime + timedelta(days=dy)).timetuple().tm_yday))) + while self.endTime >= (self.startTime + timedelta(days=dy)): + # Determine current time + ctime = self.startTime + timedelta(days=dy) + #print(ctime) + for st in self.StationInformation['Name'].tolist(): + FILES.extend(glob('{}/*{}.*.{}.{}'.format(self.MSEED_path,st,ctime.year,str(ctime.timetuple().tm_yday).zfill(3)))) dy += 1 + self.FILES = FILES @@ -134,52 +151,49 @@ def read_mseed(self,starttime,endtime,sampling_rate): self.startTime = datetime.strptime(starttime,'%Y-%m-%dT%H:%M:%S.%f') self.endTime = datetime.strptime(endtime,'%Y-%m-%dT%H:%M:%S.%f') - - self._load_fromPath() - - #print('Loading the MSEED') - - # Loading the required mseed data - c=0 - for f in self.FILES: - try: - if c==0: - st = obspy.read(f,starttime=UTCDateTime(self.startTime),endtime=UTCDateTime(self.endTime)) - c +=1 - else: - st += obspy.read(f,starttime=UTCDateTime(self.startTime),endtime=UTCDateTime(self.endTime)) - except: - continue - #print('Station File not MSEED - {}'.format(f)) - - # Removing all the stations with gaps - if len(st.get_gaps()) > 0: - stationRem = np.unique(np.array(st.get_gaps())[:,1]).tolist() - for sa in stationRem: - tr = st.select(station=sa) - for tra in tr: - - st.remove(tra) - - - # Combining the mseed and determining station avaliability - #print('Detrending and Merging MSEED') - #st.detrend() - st.merge() - - - - - # Downsample the mseed to the same level - #print('Downsampling MSEED') - st = _downsample(st,sampling_rate) self.sampling_rate = sampling_rate + self._load_fromPath() - # Checking the station Avaliability for each of the stations across this time period - #print('stationAvaliability MSEED') - signal,stA = self._stationAvaliability(st) - - + if len(self.FILES) > 0: + # Loading the required mseed data + c=0 + for f in self.FILES: + try: + if c==0: + self.st = obspy.read(f,starttime=UTCDateTime(self.startTime),endtime=UTCDateTime(self.endTime)) + c +=1 + else: + self.st += obspy.read(f,starttime=UTCDateTime(self.startTime),endtime=UTCDateTime(self.endTime)) + except: + continue + print('Station File not MSEED - {}'.format(f)) + + # Removing all the stations with gaps greater than 10.0 milliseconds + if len(self.st .get_gaps()) > 0 and np.max(np.array(self.st .get_gaps())[:,4]-np.array(self.st .get_gaps())[:,5] > 10.0) == True: + stationRem = np.unique(np.array(self.st .get_gaps())[:,1]).tolist() + for sa in stationRem: + tr = self.st .select(station=sa) + for tra in tr: + self.st .remove(tra) + + + # Combining the mseed and determining station avaliability + + self.st.merge(fill_value='interpolate') + self.st.detrend('demean') + self.st = _downsample(self.st,sampling_rate) + + signal,stA = self._stationAvaliability(self.st) + + else: + print('Data Does not exist for this time period - creating blank') + # Files don't exisit so creating zeros ones instead + exSamples = (endT-stT).total_seconds()*self.sampling_rate + 1 + stationAva = np.zeros((len(self.StationInformation['Name']),1)) + signal = np.zeros((3,len(self.StationInformation['Name']),int(exSamples))) + + +# self.st = None self.signal = signal self.FilteredSignal = np.empty((self.signal.shape)) self.FilteredSignal[:] = np.nan diff --git a/build/lib/CMS/lib/cmslib.so b/build/lib/CMS/lib/cmslib.so index d90dace..266775d 100755 Binary files a/build/lib/CMS/lib/cmslib.so and b/build/lib/CMS/lib/cmslib.so differ diff --git a/build/lib/CMS/signal/scan.py b/build/lib/CMS/signal/scan.py index 0c0677e..a3b0e05 100644 --- a/build/lib/CMS/signal/scan.py +++ b/build/lib/CMS/signal/scan.py @@ -6,6 +6,7 @@ import numpy as np from CMS.core.time import UTCDateTime +import CMS.core.model as cmod from datetime import datetime from datetime import timedelta @@ -27,6 +28,9 @@ import pandas as pd +from matplotlib.collections import PatchCollection +from matplotlib.patches import Rectangle + import matplotlib matplotlib.use('Agg') import matplotlib.pylab as plt @@ -54,11 +58,18 @@ def onset(sig, stw, ltw): # assert isinstance(snr, object) nchan, nsamp = sig.shape snr = np.copy(sig) + snr_raw = np.copy(sig) for ch in range(0, nchan): - snr[ch, :] = classic_sta_lta(sig[ch, :], stw, ltw) - np.clip(1+snr[ch,:],0.8,np.inf,snr[ch, :]) - np.log(snr[ch, :], snr[ch, :]) - return snr + if np.sum(sig[ch,:]) == 0.0: + snr[ch, :] = 0.0 + snr_raw[ch, :] = snr[ch,:] + else: + snr[ch, :] = classic_sta_lta(sig[ch, :]+1.0, stw, ltw) + snr_raw[ch, :] = snr[ch,:] + np.clip(1+snr[ch,:],0.8,np.inf,snr[ch, :]) + np.log(snr[ch, :], snr[ch, :]) + + return snr_raw,snr def filter(sig,srate,lc,hc,order=3): @@ -149,9 +160,22 @@ def write_log(self, message): fp.write(message + '\n') - def write_event(self, EVENT): - fname = path.join(self.path,self.name + '_Event.txt') - EVENT.to_csv(fname,index=False) + + + def cut_mseed(self,DATA,EventName): + fname = path.join(self.path,self.name + '_{}.mseed'.format(EventName)) + + st = DATA.st + st.write(fname, format='MSEED') + + + + def del_scan(self): + fname = path.join(self.path,self.name + '.scn') + if path.exists(fname): + print('Filename {} already exists. Deleting !'.format(fname)) + os.system('rm {}'.format(fname)) + def write_scan(self,daten,dsnr,dloc): # Defining the ouput filename @@ -236,7 +260,7 @@ def write_coal4D(self,map4D,EVENT,stT,enT): np.save(fname,map4D) - def write_coalVideo(self,MAP,lookup_table,DATA,EventCoaVal,EventName): + def write_coalVideo(self,MAP,lookup_table,DATA,EventCoaVal,EventName,AdditionalOptions=None): ''' Writing the coalescence video to file for each event ''' @@ -244,15 +268,16 @@ def write_coalVideo(self,MAP,lookup_table,DATA,EventCoaVal,EventName): filename = path.join(self.path,self.name) SeisPLT = SeisPlot(MAP,lookup_table,DATA,EventCoaVal) + SeisPLT.CoalescenceVideo(SaveFilename='{}_{}'.format(filename,EventName)) SeisPLT.CoalescenceMarginal(SaveFilename='{}_{}'.format(filename,EventName)) - def write_stationsfile(self,STATIONS,EventName): + def write_stationsfile(self,STATION_pickS,EventName): ''' Writing the stations file ''' fname = path.join(self.path,self.name + '_{}.stn'.format(EventName)) - STATIONS.to_csv(fname,index=False) + STATION_pickS.to_csv(fname,index=False) def write_event(self,EVENT,EventName): ''' @@ -273,19 +298,36 @@ class SeisPlot: CoalescenceMarginalizeLocation - ''' - def __init__(self,lut,MAP,DATA,EVENT): + def __init__(self,lut,MAP,DATA,EVENT,StationPick,PlotOptions=None): ''' This is the initial variatiables ''' - self.LUT = lut - self.DATA = DATA - self.EVENT = EVENT - self.MAP = MAP + self.LUT = lut + self.DATA = DATA + self.EVENT = EVENT + self.MAP = MAP + self.StationPick = StationPick + + + if PlotOptions == None: + self.TraceScaling = 1 + self.CMAP = 'magma' + self.LineStationColor = 'white' + self.Plot_Stations = True + self.FilteredSignal = True + self.XYFiles = None + self.RangeOrder = False + else: + try: + self.TraceScaling = PlotOptions.TraceScaling + self.CMAP = PlotOptions.MAPColor + self.LineStationColor = PlotOptions.LineStationColor + self.Plot_Stations = PlotOptions.Plot_Stations + self.FilteredSignal = PlotOptions.FilteredSignal + self.XYFiles = PlotOptions.XYFiles - self.TraceScaling = 1 - self.CMAP = 'jet' - self.Plot_Stations=True - self.FilteredSignal=True + except: + print('Error - Please define all plot option, see function ... for details.') @@ -299,13 +341,12 @@ def __init__(self,lut,MAP,DATA,EVENT): self.MAPmax = np.max(MAP) - self.CoaTraceVLINE = None - self.CoaValVLINE = None - - # Updating the Coalescence Maps - self.CoaXYPlt = None - self.CoaYZPlt = None - self.CoaXZPlt = None + self.CoaTraceVLINE = None + self.CoaValVLINE = None + + self.CoaXYPlt = None + self.CoaYZPlt = None + self.CoaXZPlt = None self.CoaXYPlt_VLINE = None self.CoaXYPlt_HLINE = None self.CoaYZPlt_VLINE = None @@ -324,7 +365,7 @@ def CoalescenceImage(self,TimeSliceIndex): TimeSlice = self.times[TimeSliceIndex] index = np.where(self.EVENT['DT'] == TimeSlice)[0][0] - indexVal = np.array(self.LUT.xyz2loc(self.LUT.coord2xyz(np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]]))[0])).astype(int) + indexVal = self.LUT.coord2loc(np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])).astype(int)[0] indexCoord = np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])[0,:] @@ -343,15 +384,18 @@ def CoalescenceImage(self,TimeSliceIndex): STIn = np.where(self.times == self.EVENT['DT'].iloc[0])[0][0] ENIn = np.where(self.times == self.EVENT['DT'].iloc[-1])[0][0] + # ------------ Defining the stations in alphabetical order -------- + StaInd = np.argsort(self.DATA.StationInformation['Name'])[::-1] + for ii in range(self.DATA.signal.shape[1]): if self.FilteredSignal == False: - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[0,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[0,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'r',linewidth=0.5) - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[1,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[1,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'b',linewidth=0.5) - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[2,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[2,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'g',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[0,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[0,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'r',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[1,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[1,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'b',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.signal[2,ii,STIn:ENIn]/np.max(abs(self.DATA.signal[2,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'g',linewidth=0.5) else: - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[0,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[0,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'r',linewidth=0.5) - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[1,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[1,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'b',linewidth=0.5) - Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[2,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[2,ii,STIn:ENIn])))*self.TraceScaling+(ii+1),'g',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[0,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[0,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'r',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[1,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[1,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'b',linewidth=0.5) + Coa_Trace.plot(np.arange(STIn,ENIn),(self.DATA.FilteredSignal[2,ii,STIn:ENIn]/np.max(abs(self.DATA.FilteredSignal[2,ii,STIn:ENIn])))*self.TraceScaling+(StaInd[ii]+1),'g',linewidth=0.5) # ---------------- Plotting the Station Travel Times ----------- for i in range(self.LUT.get_value_at('TIME_P',np.array([indexVal]))[0].shape[0]): @@ -366,15 +410,15 @@ def CoalescenceImage(self,TimeSliceIndex): TS = np.append(TS,ts) - self.CoaArriavalTP = Coa_Trace.scatter(TP,(np.arange(self.DATA.signal.shape[1])+1),15,'r',marker='v') - self.CoaArriavalTS = Coa_Trace.scatter(TS,(np.arange(self.DATA.signal.shape[1])+1),15,'b',marker='v') + self.CoaArriavalTP = Coa_Trace.scatter(TP,StaInd[ii]+1,15,'r',marker='v') + self.CoaArriavalTS = Coa_Trace.scatter(TS,StaInd[ii]+1,15,'b',marker='v') Coa_Trace.set_ylim([0,ii+2]) Coa_Trace.set_xlim([STIn,ENIn]) Coa_Trace.get_xaxis().set_ticks([]) Coa_Trace.yaxis.tick_right() - Coa_Trace.yaxis.set_ticks(np.arange(self.DATA.signal.shape[1])+1) - Coa_Trace.yaxis.set_ticklabels(self.DATA.StationInformation['Name']) + Coa_Trace.yaxis.set_ticks(StaInd[ii]+1) + Coa_Trace.yaxis.set_ticklabels(np.sort(self.DATA.StationInformation['Name'])[::-1]) self.CoaTraceVLINE = Coa_Trace.axvline(TimeSlice,0,1000,linestyle='--',linewidth=2,color='r') @@ -427,6 +471,17 @@ def CoalescenceImage(self,TimeSliceIndex): Coa_XYSlice.annotate(txt,[self.LUT.station_data['Longitude'][i],self.LUT.station_data['Latitude'][i]]) + # ------------- Plotting the XYFiles ----------- + if self.XYFiles != None: + XYFiles = pd.read_csv(self.XYFiles,names=['File','Color','Linewidth','Linestyle']) + c=0 + for ff in XYFiles['File']: + XYF = pd.read_csv(ff,names=['X','Y']) + Coa_XYSlice.plot(XYF['X'],XYF['Y'],linestyle=XYFiles['Linestyle'].iloc[c],linewidth=XYFiles['Linewidth'].iloc[c],color=XYFiles['Color'].iloc[c]) + c+=1 + + + # ------------- Plotting the station Locations ----------- try: Coa_Logo.axis('off') @@ -440,10 +495,11 @@ def CoalescenceImage(self,TimeSliceIndex): def _CoalescenceVideo_update(self,frame): + frame = int(frame) STIn = np.where(self.times == self.EVENT['DT'].iloc[0])[0][0] TimeSlice = self.times[int(frame)] index = np.where(self.EVENT['DT'] == TimeSlice)[0][0] - indexVal = np.array(self.LUT.xyz2loc(self.LUT.coord2xyz(np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]]))[0])).astype(int) + indexVal = self.LUT.coord2loc(np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])).astype(int)[0] indexCoord = np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])[0,:] # Updating the Coalescence Value and Trace Lines @@ -451,9 +507,9 @@ def _CoalescenceVideo_update(self,frame): self.CoaValVLINE.set_xdata(TimeSlice) # Updating the Coalescence Maps - self.CoaXYPlt.set_array((self.MAP[:,:,int(indexVal[2]),int(frame-STIn)]/self.MAPmax)[:-1,:-1].ravel()) - self.CoaYZPlt.set_array((self.MAP[:,int(indexVal[1]),:,int(frame-STIn)]/self.MAPmax)[:-1,:-1].ravel()) - self.CoaXZPlt.set_array((np.transpose(self.MAP[int(indexVal[0]),:,:,int(frame-STIn)])/self.MAPmax)[:-1,:-1].ravel()) + self.CoaXYPlt.set_array((self.MAP[:,:,indexVal[2],int(STIn-frame)]/self.MAPmax)[:-1,:-1].ravel()) + self.CoaYZPlt.set_array((self.MAP[:,indexVal[1],:,int(STIn-frame)]/self.MAPmax)[:-1,:-1].ravel()) + self.CoaXZPlt.set_array((np.transpose(self.MAP[indexVal[0],:,:,int(STIn-frame)])/self.MAPmax)[:-1,:-1].ravel()) # Updating the Coalescence Lines self.CoaXYPlt_VLINE.set_xdata(indexCoord[0]) @@ -465,9 +521,7 @@ def _CoalescenceVideo_update(self,frame): # Updating the station travel-times - for i in range(self.LUT.get_value_at('TIME_P',np.array([indexVal]))[0].shape[0]): - try: tp = np.argmin(abs((self.times.astype(datetime) - (TimeSlice.astype(datetime) + timedelta(seconds=self.LUT.get_value_at('TIME_P',np.array([indexVal]))[0][i])))/timedelta(seconds=1))) ts = np.argmin(abs((self.times.astype(datetime) - (TimeSlice.astype(datetime) + timedelta(seconds=self.LUT.get_value_at('TIME_S',np.array([indexVal]))[0][i])))/timedelta(seconds=1))) @@ -488,8 +542,6 @@ def _CoalescenceVideo_update(self,frame): self.CoaArriavalTS.set_offsets(np.c_[TS,(np.arange(self.DATA.signal.shape[1])+1)]) - - # # Updating the station travel-times # self.CoaArriavalTP.remove() # self.CoaArriavalTS.remove() @@ -507,6 +559,126 @@ def _CoalescenceVideo_update(self,frame): # self.CoaArriavalTS = Coa_Trace.scatter(TS,(np.arange(self.DATA.signal.shape[1])+1),15,'b',marker='v') + def CoalescenceTrace(self,SaveFilename=None): + + # Determining the maginal window value from the coalescence function + mMAP = self.MAP + mMAP = np.log(np.sum(np.exp(mMAP),axis=-1)) + mMAP = mMAP/np.max(mMAP) + mMAP_Cutoff = np.percentile(mMAP,95) + mMAP[mMAP < mMAP_Cutoff] = mMAP_Cutoff + mMAP = mMAP - mMAP_Cutoff + mMAP = mMAP/np.max(mMAP) + indexVal = np.where(mMAP == np.max(mMAP)) + indexCoord = self.LUT.xyz2coord(self.LUT.loc2xyz(np.array([[indexVal[0][0],indexVal[1][0],indexVal[2][0]]]))) + + + + + # Looping through all stations + ii=0 + while ii < self.DATA.signal.shape[1]: + fig = plt.figure(figsize=(30,15)) + + # Defining the plot + fig.patch.set_facecolor('white') + XTrace_Seis = plt.subplot(322) + YTrace_Seis = plt.subplot(324) + ZTrace_Seis = plt.subplot(321) + P_Onset = plt.subplot(323) + S_Onset = plt.subplot(326) + + + + # --- If trace is blank then remove and don't plot --- + + + + # Plotting the X-trace + XTrace_Seis.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[0,ii,:]/np.max(abs(self.DATA.FilteredSignal[0,ii,:])))*self.TraceScaling,'r',linewidth=0.5) + YTrace_Seis.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[1,ii,:]/np.max(abs(self.DATA.FilteredSignal[1,ii,:])))*self.TraceScaling,'b',linewidth=0.5) + ZTrace_Seis.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[2,ii,:]/np.max(abs(self.DATA.FilteredSignal[2,ii,:])))*self.TraceScaling,'g',linewidth=0.5) + P_Onset.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),self.DATA.SNR_P[ii,:],'r',linewidth=0.5) + S_Onset.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),self.DATA.SNR_S[ii,:],'b',linewidth=0.5) + + + # Defining Pick and Error + PICKS_df = self.StationPick['Pick'] + STATION_pick = PICKS_df[PICKS_df['Name'] == self.LUT.station_data['Name'][ii]].reset_index(drop=True) + if len(STATION_pick) > 0: + STATION_pick = STATION_pick.replace('-1.0',np.nan) + + + for jj in range(len(STATION_pick)): + if np.isnan(STATION_pick['PickError'].iloc[jj]): + continue + + if STATION_pick['Phase'].iloc[jj] == 'P': + ZTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + ZTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + ZTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + # S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + # S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + # S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + yy = gaussian_func(self.StationPick['GAU_P'][ii]['xdata'],self.StationPick['GAU_P'][ii]['popt'][0],self.StationPick['GAU_P'][ii]['popt'][1],self.StationPick['GAU_P'][ii]['popt'][2]) + P_Onset.plot(self.StationPick['GAU_P'][ii]['xdata_dt'],yy) + P_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + P_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + P_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + + + + else: + YTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + YTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + YTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + XTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + XTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + XTrace_Seis.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + yy = gaussian_func(self.StationPick['GAU_S'][ii]['xdata'],self.StationPick['GAU_S'][ii]['popt'][0],self.StationPick['GAU_S'][ii]['popt'][1],self.StationPick['GAU_S'][ii]['popt'][2]) + S_Onset.plot(self.StationPick['GAU_S'][ii]['xdata_dt'],yy) + + S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=-STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])+timedelta(seconds=+STATION_pick['PickError'].iloc[jj]/2),linestyle='--') + S_Onset.axvline(pd.to_datetime(STATION_pick['PickTime'].iloc[jj])) + + ZTrace_Seis.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii]),color='red') + P_Onset.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii]),color='red') + YTrace_Seis.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_S',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii]),color='red') + XTrace_Seis.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_S',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii]),color='red') + S_Onset.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_S',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii]),color='red') + + + # Refining the window as around the pick time + MINT = pd.to_datetime(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=0.5*self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii])) + MAXT = pd.to_datetime(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=1.5*self.LUT.get_value_at('TIME_S',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][ii])) + + XTrace_Seis.set_xlim([MINT,MAXT]) + YTrace_Seis.set_xlim([MINT,MAXT]) + ZTrace_Seis.set_xlim([MINT,MAXT]) + P_Onset.set_xlim([MINT,MAXT]) + S_Onset.set_xlim([MINT,MAXT]) + + + fig.suptitle('Trace for Station {} - PPick = {}, SPick = {}'.format(self.LUT.station_data['Name'][ii],self.StationPick['GAU_P'][ii]['PickValue'],self.StationPick['GAU_S'][ii]['PickValue'])) + + + + if SaveFilename == None: + plt.show() + else: + plt.savefig('{}_CoalescenceTrace_{}.pdf'.format(SaveFilename,self.LUT.station_data['Name'][ii])) + plt.close("all") + + + ii+=1 + + def CoalescenceVideo(self,SaveFilename=None): STIn = np.where(self.times == self.EVENT['DT'].iloc[0])[0][0] ENIn = np.where(self.times == self.EVENT['DT'].iloc[-1])[0][0] @@ -515,9 +687,8 @@ def CoalescenceVideo(self,SaveFilename=None): writer = Writer(fps=4, metadata=dict(artist='Ulvetanna'), bitrate=1800) - FIG = self.CoalescenceImage(STIn) - ani = animation.FuncAnimation(FIG, self._CoalescenceVideo_update, frames=np.arange(STIn,ENIn,int(self.DATA.sampling_rate/20)),blit=False,repeat=False) + ani = animation.FuncAnimation(FIG, self._CoalescenceVideo_update, frames=np.linspace(STIn,ENIn-1,200),blit=False,repeat=False) if SaveFilename == None: plt.show() @@ -533,15 +704,11 @@ def CoalescenceMarginal(self,SaveFilename=None): ''' - # Gaussian fit about the time period - - fig = plt.figure(figsize=(15,15)) - fig.patch.set_facecolor('white') - Coa_XYSlice = plt.subplot2grid((3, 3), (0, 0), colspan=2,rowspan=2) - Coa_YZSlice = plt.subplot2grid((3, 3), (2, 0), colspan=2) - Coa_XZSlice = plt.subplot2grid((3, 3), (0, 2), rowspan=2) - Coa_Logo = plt.subplot2grid((3, 3), (2, 2)) - + TimeSliceIndex = np.where(self.times == self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])])[0][0] + TimeSlice = self.times[TimeSliceIndex] + index = np.where(self.EVENT['DT'] == TimeSlice)[0][0] + indexVal1 = self.LUT.coord2loc(np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])).astype(int)[0] + indexCoord = np.array([[self.EVENT['X'].iloc[index],self.EVENT['Y'].iloc[index],self.EVENT['Z'].iloc[index]]])[0,:] # Determining the maginal window value from the coalescence function mMAP = self.MAP @@ -554,87 +721,138 @@ def CoalescenceMarginal(self,SaveFilename=None): indexVal = np.where(mMAP == np.max(mMAP)) indexCoord = self.LUT.xyz2coord(self.LUT.loc2xyz(np.array([[indexVal[0][0],indexVal[1][0],indexVal[2][0]]]))) - # # Determining the location optimal location and error ellipse - # samples_weights = mMAP.flatten() - # lc = self.LUT.cell_count - # ly, lx, lz = np.meshgrid(np.arange(lc[1]), np.arange(lc[0]), np.arange(lc[2])) - # x_samples = lx.flatten()*self.LUT.cell_size[0] - # y_samples = ly.flatten()*self.LUT.cell_size[1] - # z_samples = lz.flatten()*self.LUT.cell_size[2] - # SumSW = np.sum(samples_weights) - # x_expect = np.sum(samples_weights*x_samples)/SumSW - # y_expect = np.sum(samples_weights*y_samples)/SumSW - # z_expect = np.sum(samples_weights*z_samples)/SumSW - # expect_vector = np.array([x_expect/self.LUT.cell_size[0], y_expect/self.LUT.cell_size[1], z_expect/self.LUT.cell_size[2]], dtype=float) - # cov_matrix = np.zeros((3,3)) - # cov_matrix[0,0] = np.sum(samples_weights*(x_samples-x_expect)*(x_samples-x_expect))/SumSW - # cov_matrix[1,1] = np.sum(samples_weights*(y_samples-y_expect)*(y_samples-y_expect))/SumSW - # cov_matrix[2,2] = np.sum(samples_weights*(z_samples-z_expect)*(z_samples-z_expect))/SumSW - # cov_matrix[0,1] = np.sum(samples_weights*(x_samples-x_expect)*(y_samples-y_expect))/SumSW - # cov_matrix[1,0] = cov_matrix[0,1] - # cov_matrix[0,2] = np.sum(samples_weights*(x_samples-x_expect)*(z_samples-z_expect))/SumSW - # cov_matrix[2,0] = cov_matrix[0,2] - # cov_matrix[1,2] = np.sum(samples_weights*(y_samples-y_expect)*(z_samples-z_expect))/SumSW - # cov_matrix[2,1] = cov_matrix[1,2] - # expect_vector = self.lookup_table.xyz2coord(self.LUT.loc2xyz(np.array([[expect_vector[0],expect_vector[1],expect_vector[2]]])))[0] - - - # lambda_, v = np.linalg.eig(cov_matrix) - # lambda_ = np.sqrt(lambda_) + + # Defining the plots to be represented + fig = plt.figure(figsize=(30,15)) + fig.patch.set_facecolor('white') + Coa_XYSlice = plt.subplot2grid((3, 5), (0, 0), colspan=2,rowspan=2) + Coa_YZSlice = plt.subplot2grid((3, 5), (2, 0), colspan=2) + Coa_XZSlice = plt.subplot2grid((3, 5), (0, 2), rowspan=2) + Coa_Trace = plt.subplot2grid((3, 5), (0, 3), colspan=2,rowspan=2) + Coa_Logo = plt.subplot2grid((3, 5), (2, 2)) + Coa_CoaVal = plt.subplot2grid((3, 5), (2, 3), colspan=2) + + + + # ---------------- Plotting the Traces ----------- + STIn = np.where(self.times == self.EVENT['DT'].iloc[0])[0][0] + ENIn = np.where(self.times == self.EVENT['DT'].iloc[-1])[0][0] + #print(STIn,ENIn) + + + + # --------------- Ordering by distance to event -------------- + if self.RangeOrder == True: + print(self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0]) + StaInd = np.argsort(self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0])[::-1] + print(StaInd) + else: + StaInd = np.argsort(self.DATA.StationInformation['Name'])[::-1] + + + for ii in range(self.DATA.signal.shape[1]): + if self.FilteredSignal == False: + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.signal[0,ii,:]/np.max(abs(self.DATA.signal[0,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'r',linewidth=0.5) + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.signal[1,ii,:]/np.max(abs(self.DATA.signal[1,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'b',linewidth=0.5) + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.signal[2,ii,:]/np.max(abs(self.DATA.signal[2,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'g',linewidth=0.5) + else: + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[0,ii,:]/np.max(abs(self.DATA.FilteredSignal[0,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'r',linewidth=0.5) + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[1,ii,:]/np.max(abs(self.DATA.FilteredSignal[1,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'b',linewidth=0.5) + Coa_Trace.plot(np.arange(self.DATA.startTime,self.DATA.endTime+timedelta(seconds=1/self.DATA.sampling_rate),timedelta(seconds=1/self.DATA.sampling_rate)),(self.DATA.FilteredSignal[2,ii,:]/np.max(abs(self.DATA.FilteredSignal[2,ii,:])))*self.TraceScaling+(StaInd[ii]+1),'g',linewidth=0.5) + + # ---------------- Plotting the Station Travel Times ----------- + for i in range(self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0].shape[0]): + tp = self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_P',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][i]) + ts = self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])] + timedelta(seconds=self.LUT.get_value_at('TIME_S',np.array([indexVal[0][0],indexVal[1][0],indexVal[2][0]]))[0][i]) + + if i == 0: + TP = tp + TS = ts + else: + TP = np.append(TP,tp) + TS = np.append(TS,ts) + + + self.CoaArriavalTP = Coa_Trace.scatter(TP,(StaInd+1),40,'pink',marker='v') + self.CoaArriavalTS = Coa_Trace.scatter(TS,(StaInd+1),40,'purple',marker='v') + +# Coa_Trace.set_ylim([0,ii+2]) + Coa_Trace.set_xlim([self.DATA.startTime+timedelta(seconds=1.6),np.max(TS)]) + #Coa_Trace.get_xaxis().set_ticks([]) + Coa_Trace.yaxis.tick_right() + Coa_Trace.yaxis.set_ticks(StaInd+1) + Coa_Trace.yaxis.set_ticklabels(self.DATA.StationInformation['Name']) + self.CoaTraceVLINE = Coa_Trace.axvline(self.EVENT['DT'].iloc[np.argmax(self.EVENT['COA'])],0,1000,linestyle='--',linewidth=2,color='r') + + # ------------- Plotting the Coalescence Function ----------- + Coa_CoaVal.plot(self.EVENT['DT'],self.EVENT['COA']) + Coa_CoaVal.set_ylabel('Coalescence Value') + Coa_CoaVal.set_xlabel('Date-Time') + Coa_CoaVal.yaxis.tick_right() + Coa_CoaVal.yaxis.set_label_position("right") + Coa_CoaVal.set_xlim([self.EVENT['DT'].iloc[0],self.EVENT['DT'].iloc[-1]]) + Coa_CoaVal.format_xdate = mdates.DateFormatter('%Y-%m-%d') #FIX - Not working + for tick in Coa_CoaVal.get_xticklabels(): + tick.set_rotation(45) + self.CoaValVLINE = Coa_CoaVal.axvline(TimeSlice,0,1000,linestyle='--',linewidth=2,color='r') + + + + # ------------- Spatial Function ----------- # Plotting the marginal window gridX,gridY = np.mgrid[min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]))/self.LUT.cell_count[0], min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]))/self.LUT.cell_count[1]] - Coa_XYSlice.pcolormesh(gridX,gridY,mMAP[:,:,int(indexVal[2][0])],cmap=self.CMAP,linewidth=0) + rect = Rectangle((np.min(gridX), np.min(gridY)), np.max(gridX)-np.min(gridX),np.max(gridY)-np.min(gridY)) + pc = PatchCollection([rect], facecolor='k') + Coa_XYSlice.add_collection(pc) + Coa_XYSlice.pcolormesh(gridX,gridY,mMAP[:,:,int(indexVal[2][0])],cmap=self.CMAP,edgecolors='face') CS = Coa_XYSlice.contour(gridX,gridY,mMAP[:,:,int(indexVal[2][0])],levels=[0.65,0.75,0.95],colors=('g','m','k')) Coa_XYSlice.clabel(CS, inline=1, fontsize=10) Coa_XYSlice.set_xlim([min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]),max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0])]) Coa_XYSlice.set_ylim([min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]),max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1])]) - #Coa_XYSlice_ell = Ellipse(xy=(expect_vector[0], expect_vector[1]),width=lambda_[0]*2, height=lambda_[1]*2,angle=np.rad2deg(np.arccos(v[0, 0]))) - #Coa_XYSlice_ell.set_facecolor('none') - #Coa_XYSlice_ell.set_edgecolor('r') - #Coa_XYSlice.add_artist(Coa_XYSlice_ell) - #Coa_XYSlice.scatter(expect_vector[0], expect_vector[1], c="r", marker="x") - Coa_XYSlice.axvline(x=indexCoord[0][0],linestyle='--',linewidth=2,color='k') - Coa_XYSlice.axhline(y=indexCoord[0][1],linestyle='--',linewidth=2,color='k') - + Coa_XYSlice.axvline(x=indexCoord[0][0],linestyle='--',linewidth=2,color=self.LineStationColor) + Coa_XYSlice.axhline(y=indexCoord[0][1],linestyle='--',linewidth=2,color=self.LineStationColor) gridX,gridY = np.mgrid[min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]))/self.LUT.cell_count[0], min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]))/self.LUT.cell_count[2]] - Coa_YZSlice.pcolormesh(gridX,gridY,mMAP[:,int(indexVal[1][0]),:],cmap=self.CMAP,linewidth=0) + rect = Rectangle((np.min(gridX), np.min(gridY)), np.max(gridX)-np.min(gridX),np.max(gridY)-np.min(gridY)) + pc = PatchCollection([rect], facecolor='k') + Coa_YZSlice.add_collection(pc) + Coa_YZSlice.pcolormesh(gridX,gridY,mMAP[:,int(indexVal[1][0]),:],cmap=self.CMAP,edgecolors='face') CS = Coa_YZSlice.contour(gridX,gridY,mMAP[:,int(indexVal[1][0]),:], levels=[0.65,0.75,0.95],colors=('g','m','k')) Coa_YZSlice.clabel(CS, inline=1, fontsize=10) - #Coa_YZSlice_ell = Ellipse(xy=(expect_vector[1], expect_vector[2]),width=lambda_[1]*2, height=lambda_[2]*2,angle=np.rad2deg(np.arccos(v[1, 0]))) - #Coa_YZSlice_ell.set_facecolor('none') - #Coa_YZSlice_ell.set_edgecolor('r') - #Coa_YZSlice.add_artist(Coa_YZSlice_ell) - #Coa_YZSlice.scatter(expect_vector[1], expect_vector[2], c="r", marker="x") Coa_YZSlice.set_xlim([min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0]),max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,0])]) Coa_YZSlice.set_ylim([max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]),min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2])]) - Coa_YZSlice.axvline(x=indexCoord[0][0],linestyle='--',linewidth=2,color='k') - Coa_YZSlice.axhline(y=indexCoord[0][2],linestyle='--',linewidth=2,color='k') + Coa_YZSlice.axvline(x=indexCoord[0][0],linestyle='--',linewidth=2,color=self.LineStationColor) + Coa_YZSlice.axhline(y=indexCoord[0][2],linestyle='--',linewidth=2,color=self.LineStationColor) Coa_YZSlice.invert_yaxis() gridX,gridY = np.mgrid[min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]))/self.LUT.cell_count[2], min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]):max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]):(max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]) - min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]))/self.LUT.cell_count[1]] - Coa_XZSlice.pcolormesh(gridX,gridY,mMAP[int(indexVal[0][0]),:,:].transpose(),cmap=self.CMAP,linewidth=0) + rect = Rectangle((np.min(gridX), np.min(gridY)), np.max(gridX)-np.min(gridX),np.max(gridY)-np.min(gridY)) + pc = PatchCollection([rect], facecolor='k') + Coa_XZSlice.add_collection(pc) + Coa_XZSlice.pcolormesh(gridX,gridY,mMAP[int(indexVal[0][0]),:,:].transpose(),cmap=self.CMAP,edgecolors='face') CS = Coa_XZSlice.contour(gridX,gridY,mMAP[int(indexVal[0][0]),:,:].transpose(),levels =[0.65,0.75,0.95],colors=('g','m','k')) - #Coa_XZSlice.clabel(CS, inline=1, fontsize=10) - #Coa_XZSlice_ell = Ellipse(xy=(expect_vector[0], expect_vector[2]),width=lambda_[0]*2, height=lambda_[2]*2,angle=np.rad2deg(np.arccos(v[0, 0]))) - #Coa_XZSlice_ell.set_facecolor('none') - #Coa_XZSlice_ell.set_edgecolor('r') - #Coa_XZSlice.add_artist(Coa_XZSlice_ell) - #Coa_XZSlice.scatter(expect_vector[0], expect_vector[2], c="r", marker="x") Coa_XZSlice.set_xlim([max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2]),min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,2])]) Coa_XZSlice.set_ylim([min(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1]),max(self.LUT.xyz2coord(self.LUT.get_grid_xyz())[:,1])]) - Coa_XZSlice.axvline(x=indexCoord[0][2],linestyle='--',linewidth=2,color='k') - Coa_XZSlice.axhline(y=indexCoord[0][1],linestyle='--',linewidth=2,color='k') + Coa_XZSlice.axvline(x=indexCoord[0][2],linestyle='--',linewidth=2,color=self.LineStationColor) + Coa_XZSlice.axhline(y=indexCoord[0][1],linestyle='--',linewidth=2,color=self.LineStationColor) - - Coa_XYSlice.scatter(self.LUT.station_data['Longitude'],self.LUT.station_data['Latitude'],15,'k',marker='^') - Coa_YZSlice.scatter(self.LUT.station_data['Longitude'],self.LUT.station_data['Elevation'],15,'k',marker='^') - Coa_XZSlice.scatter(self.LUT.station_data['Elevation'],self.LUT.station_data['Latitude'],15,'k',marker='<') + # Plotting the station locations + Coa_XYSlice.scatter(self.LUT.station_data['Longitude'],self.LUT.station_data['Latitude'],15,marker='^',color=self.LineStationColor) + Coa_YZSlice.scatter(self.LUT.station_data['Longitude'],self.LUT.station_data['Elevation'],15,marker='^',color=self.LineStationColor) + Coa_XZSlice.scatter(self.LUT.station_data['Elevation'],self.LUT.station_data['Latitude'],15,marker='<',color=self.LineStationColor) for i,txt in enumerate(self.LUT.station_data['Name']): - Coa_XYSlice.annotate(txt,[self.LUT.station_data['Longitude'][i],self.LUT.station_data['Latitude'][i]]) - + Coa_XYSlice.annotate(txt,[self.LUT.station_data['Longitude'][i],self.LUT.station_data['Latitude'][i]],color=self.LineStationColor) + + # Plotting the XYFiles + if self.XYFiles != None: + XYFiles = pd.read_csv(self.XYFiles,names=['File','Color','Linewidth','Linestyle']) + c=0 + for ff in XYFiles['File']: + XYF = pd.read_csv(ff,names=['X','Y']) + Coa_XYSlice.plot(XYF['X'],XYF['Y'],linestyle=XYFiles['Linestyle'].iloc[c],linewidth=XYFiles['Linewidth'].iloc[c],color=XYFiles['Color'].iloc[c]) + c+=1 # Plotting the logo try: @@ -650,6 +868,7 @@ def CoalescenceMarginal(self,SaveFilename=None): else: plt.savefig('{}_EventLocationError.pdf'.format(SaveFilename),dpi=400) + plt.close('all') @@ -681,7 +900,7 @@ def __init__(self, param = None): self.time_step = 10 self.StartDateTime=None self.EndDateTime=None - self.Decimate=10 + self.Decimate=[1,1,1] if param: self.load(param) @@ -759,7 +978,10 @@ def load(self, file): class SeisScan: - def __init__(self, DATA, lut, reader=None, param=None, output_path=None, output_name=None): + def __init__(self, DATA, LUT, reader=None, param=None, output_path=None, output_name=None): + + lut = cmod.LUT() + lut.load(LUT) self.sample_rate = 1000.0 self.seis_reader = None self.lookup_table = lut @@ -771,13 +993,16 @@ def __init__(self, DATA, lut, reader=None, param=None, output_path=None, output_ self.keep_map = False - self.pre_pad = 0.0 - self.post_pad = 0.0 + ttmax = np.max(lut.fetch_map('TIME_S')) + self.pre_pad = None + self.post_pad = round(ttmax) self.time_step = 10.0 self.daten = None self.dsnr = None self.dloc = None + + self.PickThreshold = 1.0 self.bp_filter_p1 = param.bp_filter_p1 @@ -817,6 +1042,9 @@ def __init__(self, DATA, lut, reader=None, param=None, output_path=None, output_ self.MarginalWindow = 30 self.CoalescenceGrid = False self.CoalescenceVideo = False + self.CoalescencePicture = False + self.CoalescenceTrace = False + self.CutMSEED = False self.PickingType = 'Gaussian' self.LocationError = 0.95 @@ -827,6 +1055,7 @@ def __init__(self, DATA, lut, reader=None, param=None, output_path=None, output_ self.MAP = None self.EVENT = None + self.XYFiles = None def _pre_proc_p1(self, sig_z, srate): @@ -849,9 +1078,9 @@ def _compute_onset_p1(self, sig_z, srate): ltw = int(ltw * srate) + 1 # Changes the onset window to actual samples sig_z = self._pre_proc_p1(sig_z, srate) # Apply the pre-processing defintion self.filt_data['sigz'] = sig_z # defining the data to pass - sig_z = onset(sig_z, stw, ltw) # Determine the onset function using definition + sig_z_raw,sig_z = onset(sig_z, stw, ltw) # Determine the onset function using definition self.onset_data['sigz'] = sig_z # Define the onset function from the data - return sig_z + return sig_z_raw,sig_z def _compute_onset_s1(self, sig_e, sig_n, srate): stw, ltw = self.onset_win_s1 # Define the STW and LTW for the onset function @@ -860,13 +1089,14 @@ def _compute_onset_s1(self, sig_e, sig_n, srate): sig_e, sig_n = self._pre_proc_s1(sig_e, sig_n, srate) # Apply the pre-processing defintion self.filt_data['sige'] = sig_e # Defining filtered signal to pass self.filt_data['sign'] = sig_n # Defining filtered signal to pass - sig_e = onset(sig_e, stw, ltw) # Determine the onset function from the filtered signal - sig_n = onset(sig_n, stw, ltw) # Determine the onset function from the filtered signal + sig_e_raw,sig_e = onset(sig_e, stw, ltw) # Determine the onset function from the filtered signal + sig_n_raw,sig_n = onset(sig_n, stw, ltw) # Determine the onset function from the filtered signal self.onset_data['sige'] = sig_e # Define the onset function from the data self.onset_data['sign'] = sig_n # Define the onset function from the data - snr = np.sqrt(sig_e * sig_e + sig_n * sig_n) # Define the combined onset function from E & N + snr = np.sqrt(sig_e * sig_e + sig_n * sig_n) + snr_raw = np.sqrt(sig_e_raw * sig_e_raw + sig_n_raw * sig_n_raw) # Define the combined onset function from E & N self.onset_data['sigs'] = snr - return snr + return snr_raw,snr def _compute(self, cstart,cend, samples,station_avaliability): @@ -878,15 +1108,23 @@ def _compute(self, cstart,cend, samples,station_avaliability): sige = samples[0] sign = samples[1] sigz = samples[2] - - snr_p1 = self._compute_onset_p1(sigz, srate) - snr_s1 = self._compute_onset_s1(sige, sign, srate) + # Demeaning the data + #sige = sige - np.mean(sige,axis=1) + #sign = sign - np.mean(sign,axis=1) + #sigz = sigz - np.mean(sigz,axis=1) + + snr_p1_raw,snr_p1 = self._compute_onset_p1(sigz, srate) + snr_s1_raw,snr_s1 = self._compute_onset_s1(sige, sign, srate) self.DATA.SNR_P = snr_p1 self.DATA.SNR_S = snr_s1 + self.DATA.SNR_P_raw = snr_p1_raw + self.DATA.SNR_S_raw = snr_s1_raw + + #self._Gaussian_Coalescence() - snr = np.concatenate((snr_p1, snr_s1)) + snr = np.concatenate((self.DATA.SNR_P, self.DATA.SNR_S)) snr[np.isnan(snr)] = 0 @@ -896,8 +1134,8 @@ def _compute(self, cstart,cend, samples,station_avaliability): nchan, tsamp = snr.shape - pre_smp = int(self.pre_pad * srate) - pos_smp = int(self.post_pad * srate) + pre_smp = int(self.pre_pad * int(srate)) + pos_smp = int(self.post_pad * int(srate)) nsamp = tsamp - pre_smp - pos_smp daten = 0.0 - pre_smp / srate @@ -905,19 +1143,42 @@ def _compute(self, cstart,cend, samples,station_avaliability): if self._map is None: #print(' Allocating memory: {}'.format(ncell + (tsamp,))) - self._map = np.zeros(ncell + (tsamp,), dtype=np.float64) - - dind = np.zeros(tsamp, np.int64) - dsnr = np.zeros(tsamp, np.double) + self._map = np.zeros(ncell + (nsamp,), dtype=np.float64) + + dind = np.zeros(nsamp, np.int64) + dsnr = np.zeros(nsamp, np.double) + + # ilib.scan(snr, tt, 0, pre_smp + nsamp +pos_smp, self._map, self.NumberOfCores) + # ilib.detect(self._map, dsnr, dind, 0, pre_smp + nsamp +pos_smp, self.NumberOfCores) + # daten = np.arange((cstart+timedelta(seconds=self.pre_pad)), (cend + timedelta(seconds=-self.post_pad) + timedelta(seconds=1/srate)),timedelta(seconds=1/srate)) + # dsnr = np.exp((dsnr / nchan) - 1.0) + # #dsnr = classic_sta_lta(np.exp((dsnr / nchan) - 1.0),self.onset_win_p1[0]*self.sample_rate*0.5,self.onset_win_p1[1]*self.sample_rate*0.5) + # dsnr = dsnr[pre_smp:pre_smp + nsamp] + # dloc = self.lookup_table.index2xyz(dind[pre_smp:pre_smp + nsamp]) + # MAP = self._map[:,:,:,(pre_smp+1):pre_smp + nsamp] + + + print(snr.shape) + print(nsamp) + print(self.pre_pad) + print(self.post_pad) + ilib.scan(snr, tt, pre_smp, pos_smp, nsamp, self._map, self.NumberOfCores) + ilib.detect(self._map, dsnr, dind, 0,nsamp, self.NumberOfCores) + daten = np.arange((cstart+timedelta(seconds=self.pre_pad)), (cend + timedelta(seconds=-self.post_pad) + timedelta(seconds=1/srate)),timedelta(seconds=1/srate)) + dsnr = np.exp((dsnr / nchan) - 1.0) + #dsnr = classic_sta_lta(np.exp((dsnr / nchan) - 1.0),self.onset_win_p1[0]*self.sample_rate*0.5,self.onset_win_p1[1]*self.sample_rate*0.5) + dsnr = dsnr + dloc = self.lookup_table.index2xyz(dind) + MAP = self._map - ilib.scan(snr, tt, 0, pre_smp + nsamp +pos_smp, self._map, self.NumberOfCores) - ilib.detect(self._map, dsnr, dind, 0, pre_smp + nsamp +pos_smp, self.NumberOfCores) - daten = np.arange((cstart+timedelta(seconds=self.pre_pad)), (cend + timedelta(seconds=-self.post_pad) + timedelta(seconds=1/srate)),timedelta(seconds=1/srate)) - dsnr = np.exp((dsnr[pre_smp:pre_smp + nsamp] / nchan) - 1.0) - dloc = self.lookup_table.index2xyz(dind[pre_smp:pre_smp + nsamp]) + self._map = None - MAP = self._map[:,:,:,(pre_smp+1):pre_smp + nsamp] + print(daten) + print(daten.shape) + print(dsnr.shape) + print(dloc.shape) + print(MAP.shape) return daten, dsnr, dloc, MAP @@ -931,16 +1192,15 @@ def _continious_compute(self,starttime,endtime): # 2. Defining the pre- and post- padding # 3. + CoaV = 1.0 self.StartDateTime = datetime.strptime(starttime,'%Y-%m-%dT%H:%M:%S.%f') self.EndDateTime = datetime.strptime(endtime,'%Y-%m-%dT%H:%M:%S.%f') + # Deleting the scan if it exists alreadys + self.output.del_scan() - - if self.pre_pad == 0.0 and self.post_pad == 0.0: - self.pre_pad = sum(np.max(np.array([self.onset_win_p1,self.onset_win_s1]),0)) - - + # ------- Continious Seismic Detection ------ print('==============================================================================================================================') print(' Coalescence Microseismic Scanning : PATH:{} - NAME:{}'.format(self.output.path, self.output.name)) @@ -959,12 +1219,19 @@ def _continious_compute(self,starttime,endtime): #daten, dsnr, dloc = self._compute_s1(0.0, DATA.signal) daten, dsnr, dloc, map = self._compute(cstart,cend, self.DATA.signal,self.DATA.station_avaliability) - - - dcoord = self.lookup_table.xyz2coord(dloc) self.output.FileSampleRate = self.Output_SampleRate - self.output.write_scan(daten,dsnr,dcoord) + +# self.output.write_scan(daten[:-1],CoaVp[:-1],dcoord[:-1,:]) + + if i == 0: + CoaVp = dsnr + (CoaV-dsnr[0]) + self.output.write_scan(daten[:-1],CoaVp[:-1],dcoord[:-1,:]) + CoaV=CoaVp[-1] + else: + CoaVp = dsnr + (CoaV-dsnr[0]) + self.output.write_scan(daten[:-1],CoaVp[:-1],dcoord[:-1,:]) + CoaV=CoaVp[-1] i += 1 @@ -977,40 +1244,171 @@ def _continious_compute(self,starttime,endtime): def _Trigger_scn(self,CoaVal,starttime,endtime): - CoaVal = CoaVal[CoaVal['COA'] > self.DetectionThreshold] + + # Defining when exceeded threshold + CoaVal = CoaVal[CoaVal['COA'] > self.DetectionThreshold] CoaVal = CoaVal[(CoaVal['DT'] >= datetime.strptime(starttime,'%Y-%m-%dT%H:%M:%S.%f')) & (CoaVal['DT'] <= datetime.strptime(endtime,'%Y-%m-%dT%H:%M:%S.%f'))] - CoaVal = CoaVal.sort_values(['COA'],ascending=False).reset_index(drop=True) - CoaVal['EventID'] = 0 - c=1 - for i in range(len(CoaVal)): - if CoaVal['EventID'].iloc[i] == 0: - tmpDT_MIN = CoaVal['DT'].iloc[i] + timedelta(seconds=-self.MarginalWindow/2) - tmpDT_MAX = CoaVal['DT'].iloc[i] + timedelta(seconds=self.MarginalWindow/2) + CoaVal = CoaVal.reset_index(drop=True) + # ----------- Determining the initial triggered events, not inspecting overlaps ------ + c = 0 + e = 1 + while c < len(CoaVal): - tmpDATA = CoaVal[(CoaVal['DT'] >= tmpDT_MIN) & (CoaVal['DT'] <= tmpDT_MAX)] + # Determining the index when above the level and maximum value + d=c + while CoaVal['DT'].iloc[d] + timedelta(seconds=1/self.sample_rate) == CoaVal['DT'].iloc[d+1]: + d+=1 + if d+1 >= len(CoaVal)-1: + d=len(CoaVal)-1 + break - CoaVal['EventID'].iloc[tmpDATA.index] = c - c+=1 + + + indmin = c + indmax = d + indVal = np.argmax(CoaVal['COA'].iloc[np.arange(c,d+1)]) + + # Determining the times for min,max and max coalescence value + TimeMin = CoaVal['DT'].iloc[indmin] + TimeMax = CoaVal['DT'].iloc[indmax] + TimeVal = CoaVal['DT'].iloc[indVal] + + COA_V = CoaVal['COA'].iloc[indVal] + COA_X = CoaVal['X'].iloc[indVal] + COA_Y = CoaVal['Y'].iloc[indVal] + COA_Z = CoaVal['Z'].iloc[indVal] + + + + if (TimeVal-TimeMin) < timedelta(seconds=0.5*self.MarginalWindow): + TimeMin = CoaVal['DT'].iloc[indmin] + timedelta(seconds=-0.5*self.MarginalWindow) + if (TimeMax - TimeVal) < timedelta(seconds=0.5*self.MarginalWindow): + TimeMax = CoaVal['DT'].iloc[indmax] + timedelta(seconds=0.5*self.MarginalWindow) + + + # Appending these triggers to array + if 'IntEvents' not in vars(): + IntEvents = pd.DataFrame([[e,TimeVal,COA_V,COA_X,COA_Y,COA_Z,TimeMin,TimeMax]],columns=['EventNum','CoaTime','COA_V','COA_X','COA_Y','COA_Z','MinTime','MaxTime']) else: - continue + dat = pd.DataFrame([[e,TimeVal,COA_V,COA_X,COA_Y,COA_Z,TimeMin,TimeMax]],columns=['EventNum','CoaTime','COA_V','COA_X','COA_Y','COA_Z','MinTime','MaxTime']) + IntEvents = IntEvents.append(dat,ignore_index=True) + - EVENTS = pd.DataFrame(columns=['DT','COA','X','Y','Z']) - if max(CoaVal['EventID']) > 0: - for j in range(1,max(CoaVal['EventID'])+1): - tmpDATA = CoaVal[CoaVal['EventID'] == j].sort_values(['COA'],ascending=False).reset_index(drop=True) - EVENTS = EVENTS.append(tmpDATA.iloc[0]) - EVENTS.reset_index(drop=True) + c=d+1 + e+=1 + + + # ----------- Determining the initial triggered events, not inspecting overlaps ------ + EventNum = np.ones((len(IntEvents)),dtype=int) + d=1 + for ee in range(len(IntEvents)): + + if (ee+1 < len(IntEvents)) and ((IntEvents['MaxTime'].iloc[ee] - IntEvents['MinTime'].iloc[ee+1]).total_seconds() < 0): + EventNum[ee] = d + d+=1 + else: + EventNum[ee] = d + IntEvents['EventNum'] = EventNum + + + d=0 + for ee in range(1,np.max(IntEvents['EventNum'])+1): + tmp = IntEvents[IntEvents['EventNum'] == ee].reset_index(drop=True) + if d==0: + EVENTS = pd.DataFrame([[ee, tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])], np.max(tmp['COA_V']), tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])], tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])], tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])],tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])] + timedelta(seconds=-self.MarginalWindow),tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])] + timedelta(seconds=self.MarginalWindow)]],columns=['EventNum','CoaTime','COA_V','COA_X','COA_Y','COA_Z','MinTime','MaxTime']) + d+=1 + else: + EVENTS = EVENTS.append(pd.DataFrame([[ee, tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])], np.max(tmp['COA_V']), tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])], tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])], tmp['COA_X'].iloc[np.argmax(tmp['COA_V'])],tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])] + timedelta(seconds=-self.MarginalWindow),tmp['CoaTime'].iloc[np.argmax(tmp['COA_V'])] + timedelta(seconds=self.MarginalWindow)]],columns=['EventNum','CoaTime','COA_V','COA_X','COA_Y','COA_Z','MinTime','MaxTime']),ignore_index=True) + + + + + + # Defining an event id based on maximum coalescence + EVID = np.chararray(len(EVENTS),17) for e in range(len(EVENTS)): - EVENTS['EventID'].iloc[e] = re.sub(r"\D", "",EVENTS['DT'].astype(str).iloc[e]) + EVID[e] = str(re.sub(r"\D", "",EVENTS['CoaTime'].astype(str).iloc[e])) + EVENTS['EventID'] = EVID + + + return EVENTS + def plot_scn(self,starttime,endtime,stations=None,savefile=None): + ''' + + + ''' + + # Defining the filename of the trace + fname = path.join(self.output.path,self.output.name + '.scn') + + + + if path.exists(fname): + + # Loading the .scn file + DATA = pd.read_csv(fname,names=['DT','COA','X','Y','Z']) + DATA['DT'] = pd.to_datetime(DATA['DT']) + + if stations == None: + # Plotting the .scn file + + fig = plt.figure(figsize=(30,15)) + fig.patch.set_facecolor('white') + plt.plot(DATA['DT'],DATA['COA'],color='blue') + plt.xlabel('Datetime') + plt.ylabel('Maximum Coalescence') + plt.axhline(self.DetectionThreshold,color='green') + + EVENTS = self._Trigger_scn(DATA,DATA['DT'].iloc[0].strftime('%Y-%m-%dT%H:%M:%S.%f'),DATA['DT'].iloc[-1].strftime('%Y-%m-%dT%H:%M:%S.%f')) + + for ee in range(len(EVENTS['MinTime'])): + plt.axvline(x=pd.to_datetime(EVENTS['MinTime'].iloc[ee]),linestyle='--',color='red') + plt.axvline(x=pd.to_datetime(EVENTS['MaxTime'].iloc[ee]),linestyle='--',color='red') + plt.axvline(x=pd.to_datetime(EVENTS['CoaTime'].iloc[ee]),color='red') + + plt.xlim([pd.to_datetime(starttime),pd.to_datetime(endtime)]) + + + else: + # Plotting the .scn file with the addition of station avaliability + fig = plt.figure(figsize=(30,15)) + fig.patch.set_facecolor('white') + plt.plot(DATA['DT'],DATA['COA'],color='blue') + plt.xlabel('Datetime') + plt.ylabel('Maximum Coalescence') + plt.axhline(self.DetectionThreshold,color='green') + + EVENTS = self._Trigger_scn(DATA,DATA['DT'].iloc[0].strftime('%Y-%m-%dT%H:%M:%S.%f'),DATA['DT'].iloc[-1].strftime('%Y-%m-%dT%H:%M:%S.%f')) + + for ee in range(len(EVENTS['MinTime'])): + plt.axvline(x=pd.to_datetime(EVENTS['MinTime'].iloc[ee]),linestyle='--',color='red') + plt.axvline(x=pd.to_datetime(EVENTS['MaxTime'].iloc[ee]),linestyle='--',color='red') + plt.axvline(x=pd.to_datetime(EVENTS['CoaTime'].iloc[ee]),color='red') + + plt.xlim([pd.to_datetime(starttime),pd.to_datetime(endtime)]) + + + + + + # Saving figure if defined + if savefile == None: + plt.show() + else: + plt.savefig(savefile) + + else: + print('Please run scn.Detect to generate a .scn file !') + @@ -1022,15 +1420,196 @@ def Detect(self,starttime,endtime): ''' # Conduct the continious compute on the decimated grid - lut = self.lookup_table - lut_decimate = lut.decimate([self.Decimate,self.Decimate,self.Decimate]) - self.lookup_table = lut_decimate - + self.lookup_table = self.lookup_table.decimate(self.Decimate) + + # Define pre-pad as a function of the onset windows + if self.pre_pad is None: + self.pre_pad = max(self.onset_win_p1[1],self.onset_win_s1[1]) + 3*max(self.onset_win_p1[0],self.onset_win_s1[0]) + # Dectect the possible events from the decimated grid self._continious_compute(starttime,endtime) - def _GaussianTrigger(self,SNR,PHASE,cstart,eventT): + def _Gaussian_Coalescence(self): + ''' + Function to fit a gaussian for the coalescence function. + ''' + + + SNR_P = self.DATA.SNR_P + SNR_S = self.DATA.SNR_S + X = np.arange(SNR_P.shape[1]) + + GAU_THRESHOLD = 1.4 + + #---- Selecting only the data above a predefined threshold ---- + # Setting values below threshold to nan + SNR_P[np.where(SNR_P < GAU_THRESHOLD)] = np.nan + SNR_S[np.where(SNR_S < GAU_THRESHOLD)] = np.nan + + + + + # Defining two blank arrays that gaussian periods should be defined for + SNR_P_GauNum = np.zeros(SNR_P.shape)*np.nan + SNR_S_GauNum = np.zeros(SNR_S.shape)*np.nan + + + # --- Determing the indexs to fit gaussians about --- + + for s in range(len(SNR_P)): + c=0 + e=1 + + ValInd = np.where(~np.isnan(SNR_P[s,:]))[0] + while c < len(ValInd): + + # Determining the index when above the level and maximum value + d=c + while ValInd[d]+1 == ValInd[d+1]: + d+=1 + if d+1 >= len(ValInd)-1: + d=len(ValInd)-1 + break + + + indmin = c + indmax = d + + SNR_P_GauNum[s,ValInd[c]:ValInd[d]] = e + + + c=d+1 + e+=1 + + self.DATA.SNR_P_GauNum = SNR_P_GauNum + + for s in range(len(SNR_S)): + c=0 + e=1 + ValInd = np.where(~np.isnan(SNR_S[s,:]))[0] + while c < len(ValInd): + + # Determining the index when above the level and maximum value + d=c + while ValInd[d]+1 == ValInd[d+1]: + d+=1 + if d+1 >= len(ValInd)-1: + d=len(ValInd)-1 + break + + + indmin = c + indmax = d + + SNR_S_GauNum[s,ValInd[c]:ValInd[d]] = e + + + c=d+1 + e+=1 + + + + self.DATA.SNR_S_GauNum = SNR_S_GauNum + + # --- Determing the indexs to fit gaussians about --- + + SNR_PGAU = np.zeros(SNR_P.shape) + for s in range(SNR_P.shape[0]): + if ~np.isnan(np.nanmax(SNR_P_GauNum[s,:])): + c=0 + for ee in range(1,int(round(np.nanmax(SNR_P_GauNum[s,:])))): + + + XSig = X[np.where((SNR_P_GauNum[s,:] == ee))[0]] + YSig = SNR_P[s,np.where((SNR_P_GauNum[s,:] == ee))[0]] + + #print(' LEN = {} and CUT_LEN = {}'.format(len(YSig),round(float(self.bp_filter_p1[0])*self.sample_rate)/10)) + + if len(YSig) > 8: + + #self.DATA.SNR_P = YSig + + try: + lowfreq=float(self.bp_filter_p1[0]) + p0 = [np.max(YSig), np.argmax(YSig) + np.min(XSig), 1./(lowfreq/4.)] + + # Fitting the gaussian to the function + + #print(XSig) + #print(YSig) + #print(p0) + popt, pcov = curve_fit(gaussian_func, XSig, YSig, p0) # Fit gaussian to data + tmp_PGau = gaussian_func(XSig.astype(float),float(popt[0]),float(popt[1]),float(popt[2])) + #print(tmp_PGau) + + if c == 0: + SNR_P_GAU = np.zeros(X.shape) + SNR_P_GAU[np.where((SNR_P_GauNum[s,:] == ee))[0]] = tmp_PGau + c+=1 + else: + SNR_P_GAU[np.where((SNR_P_GauNum[s,:] == ee))[0]] = tmp_PGau + except: + print('Error with {}'.format(ee)) + + else: + continue + + SNR_PGAU[s,:] = SNR_P_GAU + + self.DATA.SNR_P = SNR_PGAU + + + + # --- Determing the indexs to fit gaussians about --- + + SNR_SGAU = np.zeros(SNR_S.shape) + for s in range(SNR_S.shape[0]): + if ~np.isnan(np.nanmax(SNR_S_GauNum[s,:])): + c=0 + for ee in range(1,int(round(np.nanmax(SNR_S_GauNum[s,:])))): + + + XSig = X[np.where((SNR_S_GauNum[s,:] == ee))[0]] + YSig = SNR_S[s,np.where((SNR_S_GauNum[s,:] == ee))[0]] + + print(' LEN = {} and CUT_LEN = {}'.format(len(YSig),round(float(self.bp_filter_p1[0])*self.sample_rate)/10)) + + if len(YSig) > 8: + + #self.DATA.SNR_P = YSig + + try: + lowfreq=float(self.bp_filter_p1[0]) + p0 = [np.max(YSig), np.argmax(YSig) + np.min(XSig), 1./(lowfreq/4.)] + + # Fitting the gaussian to the function + + print(XSig) + print(YSig) + print(p0) + popt, pcov = curve_fit(gaussian_func, XSig, YSig, p0) # Fit gaussian to data + tmp_SGau = gaussian_func(XSig.astype(float),float(popt[0]),float(popt[1]),float(popt[2])) + print(tmp_SGau) + + if c == 0: + SNR_S_GAU = np.zeros(X.shape) + SNR_S_GAU[np.where((SNR_S_GauNum[s,:] == ee))[0]] = tmp_SGau + c+=1 + else: + SNR_S_GAU[np.where((SNR_S_GauNum[s,:] == ee))[0]] = tmp_SGau + except: + print('Error with {}'.format(ee)) + + else: + continue + + SNR_SGAU[s,:] = SNR_S_GAU + + self.DATA.SNR_S = SNR_PGAU + + + def _GaussianTrigger(self,SNR,PHASE,cstart,eventTP,eventTS,Name): ''' Function to fit gaussian to onset function, based on knowledge of approximate trigger index, lowest freq within signal and signal sampling rate. Will fit gaussian and return standard @@ -1038,26 +1617,130 @@ def _GaussianTrigger(self,SNR,PHASE,cstart,eventT): ''' + #print('Fitting Gaussian for {} - {} - {}'.format(PHASE,cstart,eventT)) + sampling_rate = self.sample_rate - trig_idx = int(((eventT-cstart).seconds + (eventT-cstart).microseconds/10.**6) *sampling_rate) + + # Determining the triggering X location based on the SNR value + trig_idx_P = int(((eventTP-cstart).seconds + (eventTP-cstart).microseconds/10.**6) *sampling_rate) + trig_idx_S = int(((eventTS-cstart).seconds + (eventTS-cstart).microseconds/10.**6) *sampling_rate) + + + + P_idxmin = int(trig_idx_P - (trig_idx_S-trig_idx_P)/2) + P_idxmax = int(trig_idx_P + (trig_idx_S-trig_idx_P)/2) + S_idxmin = int(trig_idx_S - (trig_idx_S-trig_idx_P)/2) + S_idxmax = int(trig_idx_S + (trig_idx_S-trig_idx_P)/2) + for ii in [P_idxmin,P_idxmax,S_idxmin,S_idxmax]: + if ii < 0: + ii = 0 + if ii > len(SNR): + ii = len(SNR) + + + #print(' Pmin = {} , Pmax = {}'.format(P_idxmin,P_idxmax)) + #print(' Smin = {} , Smax = {}'.format(S_idxmin,S_idxmax)) + + Pidx = np.argmax(SNR[P_idxmin:P_idxmax]) + P_idxmin + Sidx = np.argmax(SNR[S_idxmin:S_idxmax]) + S_idxmin + #print(Pidx,Sidx) + + if PHASE == 'P': lowfreq = self.bp_filter_p1[0] + trig_idx = Pidx if PHASE == 'S': lowfreq = self.bp_filter_s1[0] + trig_idx = Sidx data_half_range = int(1.25*sampling_rate/(lowfreq)) # half range number of indices to fit guassian over (for 1 wavelengths of lowest frequency component) x_data = np.arange(trig_idx-data_half_range, trig_idx+data_half_range,dtype=float)/sampling_rate # x data, in seconds + y_data = SNR[int(trig_idx-data_half_range):int(trig_idx+data_half_range)] # +/- one wavelength of lowest frequency around trigger p0 = [np.amax(SNR), float(trig_idx)/sampling_rate, 1./(lowfreq/4.)] # Initial guess (should work for any sampling rate and frequency) + + d = 0 + for jj in range(len(x_data)): + if d == 0: + XDATA = cstart + timedelta(seconds=x_data[jj]) + d+=1 + else: + XDATA = np.hstack((XDATA,(cstart + timedelta(seconds=x_data[jj])))) + + + + + + + + + + + try: popt, pcov = curve_fit(gaussian_func, x_data, y_data, p0) # Fit gaussian to data sigma = np.absolute(popt[2]) # Get standard deviation from gaussian fit + + # Mean is popt[1]. x_data[0] + popt[1] (In seconds) + mean = cstart + timedelta(seconds=float(popt[1])) + + maxSNR = popt[0] + + + # Determining if the pick is above + n, bins = np.histogram(SNR,bins=np.arange(0,np.max(SNR),7/5000)) + mids = 0.5*(bins[1:] + bins[:-1]) + ncum = np.cumsum(n)/np.sum(n) + #print(ncum) + + #print(np.where((mids-popt[0]) >= 0)[0]) + + if (len(np.where((mids-popt[0]) >= 0)[0]) == 0): + #print('Picked {} for {}'.format(PHASE,Name)) + GAU_FITS = {} + GAU_FITS['popt'] = popt + GAU_FITS['xdata'] = x_data + GAU_FITS['xdata_dt'] = XDATA + GAU_FITS['PickValue'] = 1.0 + + else: + if (np.min(ncum[np.where((mids-popt[0]) >= 0)[0]]) >= self.PickThreshold): + #print('Picked 2 {} for {} - {}'.format(PHASE,Name,np.min(ncum[np.where((mids-popt[0]) >= 0)[0]]))) + GAU_FITS = {} + GAU_FITS['popt'] = popt + GAU_FITS['xdata'] = x_data + GAU_FITS['xdata_dt'] = XDATA + GAU_FITS['PickValue'] = np.min(ncum[np.where((mids-popt[0]) >= 0)[0]]) + else: + #print('Didnt Pick {} for {} - {}'.format(PHASE,Name,np.min(ncum[np.where((mids-popt[0]) >= 0)[0]]))) + GAU_FITS = {} + GAU_FITS['popt'] = np.zeros((3)) + GAU_FITS['xdata'] = np.zeros(x_data.shape) + GAU_FITS['xdata_dt'] = np.zeros(XDATA.shape) + GAU_FITS['PickValue'] = np.min(ncum[np.where((mids-popt[0]) >= 0)[0]]) + sigma = -1 + mean = -1 + maxSNR = -1 + + + #print(mean) except: + GAU_FITS = {} + GAU_FITS['popt'] = np.zeros((3)) + GAU_FITS['xdata'] = np.zeros(x_data.shape) + GAU_FITS['xdata_dt'] = np.zeros(XDATA.shape) + GAU_FITS['PickValue'] = -1 + + sigma = -1 + mean = -1 + maxSNR = -1 + + return GAU_FITS,maxSNR,sigma,mean + - return sigma @@ -1070,39 +1753,52 @@ def _ArrivalTrigger(self,EVENT_MaxCoa,EventName): SNR_P = self.DATA.SNR_P SNR_S = self.DATA.SNR_S - ttp = self.lookup_table.value_at('TIME_P', np.array(self.lookup_table.coord2xyz(np.array([EVENT_MaxCoa[['X','Y','Z']].tolist()]))).astype(int))[0] - tts = self.lookup_table.value_at('TIME_S', np.array(self.lookup_table.coord2xyz(np.array([EVENT_MaxCoa[['X','Y','Z']].tolist()]))).astype(int))[0] + #print(EVENT_MaxCoa[['X','Y','Z']].iloc[0]) - print(ttp) + ttp = self.lookup_table.value_at('TIME_P', np.array(self.lookup_table.coord2xyz(np.array([EVENT_MaxCoa[['X','Y','Z']].values]))).astype(int))[0] + tts = self.lookup_table.value_at('TIME_S', np.array(self.lookup_table.coord2xyz(np.array([EVENT_MaxCoa[['X','Y','Z']].values]))).astype(int))[0] + # Determining the stations that can be picked on and the phasese - STATIONS=pd.DataFrame(columns=['Name','Phase','Pick','PickError']) + STATION_pickS=pd.DataFrame(columns=['Name','Phase','ModelledTime','PickTime','PickError']) + c=0 + d=0 for s in range(len(SNR_P)): - if np.nansum(SNR_P[s]) != 0: - stationEventPT = EVENT_MaxCoa['DT'] + timedelta(seconds=ttp[s]) + stationEventPT = EVENT_MaxCoa['DT'] + timedelta(seconds=ttp[s]) + stationEventST = EVENT_MaxCoa['DT'] + timedelta(seconds=tts[s]) - if self.PickingType == 'Gaussian': - Err = self._GaussianTrigger(SNR_P[s],'P',self.DATA.startTime,stationEventPT.to_pydatetime()) + if self.PickingType == 'Gaussian': + GauInfoP,maxSNR_P,Err,Mn = self._GaussianTrigger(SNR_P[s],'P',self.DATA.startTime,stationEventPT.to_pydatetime(),stationEventST.to_pydatetime(),self.lookup_table.station_data['Name'][s]) + if c==0: + GAUP = GauInfoP + c+=1 + else: + GAUP = np.hstack((GAUP,GauInfoP)) + + tmpSTATION_pick = pd.DataFrame([[self.lookup_table.station_data['Name'][s],'P',stationEventPT,Mn,Err,maxSNR_P]],columns=['Name','Phase','ModelledTime','PickTime','PickError','PickSNR']) + STATION_pickS = STATION_pickS.append(tmpSTATION_pick) - - tmpSTATION = pd.DataFrame([[self.lookup_table.station_data['Name'][s],'P',stationEventPT,Err]],columns=['Name','Phase','Pick','PickError']) - STATIONS = STATIONS.append(tmpSTATION) - if np.nansum(SNR_S[s]) != 0: - - stationEventST = EVENT_MaxCoa['DT'] + timedelta(seconds=tts[s]) + if self.PickingType == 'Gaussian': + GauInfoS,maxSNR_S,Err,Mn = self._GaussianTrigger(SNR_S[s],'S',self.DATA.startTime,stationEventPT.to_pydatetime(),stationEventST.to_pydatetime(),self.lookup_table.station_data['Name'][s]) - if self.PickingType == 'Gaussian': - Err = self._GaussianTrigger(SNR_P[s],'S',self.DATA.startTime,stationEventPT.to_pydatetime()) - tmpSTATION = pd.DataFrame([[self.lookup_table.station_data['Name'][s],'S',stationEventST,Err]],columns=['Name','Phase','Pick','PickError']) - STATIONS = STATIONS.append(tmpSTATION) + if d==0: + GAUS = GauInfoS + d+=1 + else: + GAUS = np.hstack((GAUS,GauInfoS)) + + tmpSTATION_pick = pd.DataFrame([[self.lookup_table.station_data['Name'][s],'S',stationEventST,Mn,Err,maxSNR_S]],columns=['Name','Phase','ModelledTime','PickTime','PickError','PickSNR']) + STATION_pickS = STATION_pickS.append(tmpSTATION_pick) - #print(STATIONS) + #print(STATION_pickS) # Saving the output from the triggered events - self.output.write_stationsfile(STATIONS,EventName) + STATION_pickS = STATION_pickS[['Name','Phase','ModelledTime','PickTime','PickError']] + self.output.write_stationsfile(STATION_pickS,EventName) + return STATION_pickS,GAUP,GAUS def _ErrorEllipse(self,COA3D): """ @@ -1165,7 +1861,7 @@ def _LocationError(self,Map4D): CoaMap = np.log(np.sum(np.exp(Map4D),axis=-1)) CoaMap = CoaMap/np.max(CoaMap) - CoaMap_Cutoff = np.percentile(CoaMap,95) + CoaMap_Cutoff = 0.8 CoaMap[CoaMap < CoaMap_Cutoff] = CoaMap_Cutoff CoaMap = CoaMap - CoaMap_Cutoff CoaMap = CoaMap/np.max(CoaMap) @@ -1189,80 +1885,77 @@ def _LocationError(self,Map4D): def Trigger(self,starttime,endtime): ''' - + ''' # Intial Detection of the events from .scn file CoaVal = self.output.read_scan() EVENTS = self._Trigger_scn(CoaVal,starttime,endtime) - # + + # Conduct the continious compute on the decimated grid + self.lookup_table = self.lookup_table.decimate(self.Decimate) + + # Define pre-pad as a function of the onset windows + if self.pre_pad is None: + self.pre_pad = max(self.onset_win_p1[1],self.onset_win_s1[1]) + 3*max(self.onset_win_p1[0],self.onset_win_s1[0]) + + # Triggered = pd.DataFrame(columns=['DT','COA','X','Y','Z','ErrX','ErrY','ErrZ']) for e in range(len(EVENTS)): - print('--Processing for Event {} of {} - {}'.format(e+1,len(EVENTS),EVENTS['EventID'].iloc[e])) + print('--Processing for Event {} of {} - {}'.format(e+1,len(EVENTS),(EVENTS['EventID'].iloc[e]).astype(str))) # Determining the Seismic event location - cstart = EVENTS['DT'].iloc[e].to_pydatetime() + timedelta(seconds = -self.MarginalWindow/2) + timedelta(seconds = -self.pre_pad) - cend = EVENTS['DT'].iloc[e].to_pydatetime() + timedelta(seconds = self.MarginalWindow/2) + timedelta(seconds = self.post_pad) + cstart = EVENTS['MinTime'].iloc[e] + timedelta(seconds=-self.pre_pad) + cend = EVENTS['MaxTime'].iloc[e] + timedelta(seconds=self.post_pad) self.DATA.read_mseed(cstart.strftime('%Y-%m-%dT%H:%M:%S.%f'),cend.strftime('%Y-%m-%dT%H:%M:%S.%f'),self.sample_rate) - self._map = None - daten, dsnr, dloc, MAP = self._compute(cstart,cend,self.DATA.signal,self.DATA.station_avaliability) + daten, dsnr, dloc, self.MAP = self._compute(cstart,cend,self.DATA.signal,self.DATA.station_avaliability) + dcoord = self.lookup_table.xyz2coord(np.array(dloc).astype(int)) EventCoaVal = pd.DataFrame(np.array((daten,dsnr,dcoord[:,0],dcoord[:,1],dcoord[:,2])).transpose(),columns=['DT','COA','X','Y','Z']) EventCoaVal['DT'] = pd.to_datetime(EventCoaVal['DT']) - - EVENT = EventCoaVal.sort_values(by=['COA'],ascending=False).iloc[0] - - pickle.dump(self.DATA,open("ONSET.p", "wb" )) - - self.MAP = MAP - self.EVENT = EVENT - self.cstart = cstart - self.cend = cend - + self.EVENT = EventCoaVal + self.EVENT_max = self.EVENT.iloc[EventCoaVal['COA'].astype('float').idxmax()] # Determining the hypocentral location from the maximum over the marginal window. - self._ArrivalTrigger(EVENT,EVENTS['EventID'].iloc[e]) + Picks,GAUP,GAUS = self._ArrivalTrigger(self.EVENT_max,(EVENTS['EventID'].iloc[e].astype(str))) + StationPick = {} + StationPick['Pick'] = Picks + StationPick['GAU_P'] = GAUP + StationPick['GAU_S'] = GAUS - # Determining earthquake location error - LOC,LOC_ERR = self._LocationError(MAP) - - EVENT['X_ErrE'] = LOC[0] - EVENT['Y_ErrE'] = LOC[1] - EVENT['Z_ErrE'] = LOC[2] + LOC,LOC_ERR = self._LocationError(self.MAP) - EVENT['ErrX'] = LOC_ERR[0] - EVENT['ErrY'] = LOC_ERR[1] - EVENT['ErrZ'] = LOC_ERR[2] + EV = pd.DataFrame([np.append(self.EVENT_max.as_matrix(),[LOC[0],LOC[1],LOC[2],LOC_ERR[0],LOC_ERR[1],LOC_ERR[2]])],columns=['DT','COA','X','Y','Z','X_ErrE','Y_ErrE','Z_ErrE','ErrX','ErrY','ErrZ']) + self.output.write_event(EV,str(EVENTS['EventID'].iloc[e].astype(str))) + if self.CutMSEED == True: + self.output.cut_mseed(self.DATA,str(EVENTS['EventID'].iloc[e].astype(str))) - - self.output.write_event(EVENT,EVENTS['EventID'].iloc[e]) # Outputting coalescence grids and triggered events - if self.CoalescenceGrid == True: + if self.CoalescenceTrace == True: + SeisPLT = SeisPlot(self.lookup_table,self.MAP,self.DATA,self.EVENT,StationPick) + SeisPLT.CoalescenceTrace(SaveFilename='{}_{}'.format(path.join(self.output.path, self.output.name),EVENTS['EventID'].iloc[e].astype(str))) - # CoaMap = {} - # CoaMap['MAP'] = MAP - # CoaMap['CoaDATA'] = tmpEvent + if self.CoalescenceGrid == True: self.output.write_coal4D(MAP,e,cstart,cend) if self.CoalescenceVideo == True: - self.MAP = MAP - self.EVENT = EventCoaVal - self.output.write_coalVideo(self.lookup_table,MAP,self.DATA,EventCoaVal,EVENTS['EventID'].iloc[e]) - - - - - - Triggered = Triggered.append(EVENT) + SeisPLT = SeisPlot(self.lookup_table,self.MAP,self.DATA,self.EVENT,StationPick) + SeisPLT.CoalescenceVideo(SaveFilename='{}_{}'.format(path.join(self.output.path, self.output.name),EVENTS['EventID'].iloc[e].astype(str))) + if self.CoalescencePicture == True: + SeisPLT = SeisPlot(self.lookup_table,self.MAP,self.DATA,self.EVENT,StationPick) + SeisPLT.CoalescenceMarginal(SaveFilename='{}_{}'.format(path.join(self.output.path, self.output.name),EVENTS['EventID'].iloc[e].astype(str))) - return Triggered + self.MAP = None + self.EVENT = None + self.cstart = None + self.cend = None diff --git a/dist/CMS-0.2.18.11.1-py3.5.egg b/dist/CMS-0.2.18.11.1-py3.5.egg new file mode 100644 index 0000000..a858610 Binary files /dev/null and b/dist/CMS-0.2.18.11.1-py3.5.egg differ diff --git a/dist/CMS-0.2.18.11.2-py3.5.egg b/dist/CMS-0.2.18.11.2-py3.5.egg new file mode 100644 index 0000000..db10daa Binary files /dev/null and b/dist/CMS-0.2.18.11.2-py3.5.egg differ diff --git a/src/CMS.egg-info/PKG-INFO b/src/CMS.egg-info/PKG-INFO index 1152858..dacfeac 100755 --- a/src/CMS.egg-info/PKG-INFO +++ b/src/CMS.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: CMS -Version: 0.2.18.10.30 +Version: 0.2.18.11.2 Summary: Coalescence Microseismic Scanning - CMScanning Home-page: UNKNOWN Author: Jonathan Smith, Julian Drew, Tom Hudson, and Tom Winder diff --git a/src/CMS/core/__pycache__/cmslib.cpython-35.pyc b/src/CMS/core/__pycache__/cmslib.cpython-35.pyc index 70dc889..1900cee 100644 Binary files a/src/CMS/core/__pycache__/cmslib.cpython-35.pyc and b/src/CMS/core/__pycache__/cmslib.cpython-35.pyc differ diff --git a/src/CMS/core/cmslib.py b/src/CMS/core/cmslib.py index 6d107e5..203d202 100755 --- a/src/CMS/core/cmslib.py +++ b/src/CMS/core/cmslib.py @@ -91,12 +91,12 @@ def nlevinson(acc): -_cmslib.scan4d.argtypes = [c_dPt, c_i32Pt, c_dPt,c_int32, c_int32, c_int32, c_int32, c_int64, c_int64] -_cmslib.detect4d.argtypes = [c_dPt, c_dPt,c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] -_cmslib.detect4d_t.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] +_cmslib.scan4d.argtypes = [c_dPt, c_i32Pt, c_dPt, c_int32, c_int32, c_int32, c_int32, c_int64, c_int64] +_cmslib.detect4d.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] +# _cmslib.detect4d_t.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64] -def scan(sig, tt, fsmp, lsmp, map4d,threads): - nstn, nsamp = sig.shape +def scan(sig, tt, fsmp,lsmp, nsamp, map4d, threads): + nstn, ssmp = sig.shape if not tt.shape[-1] == nstn: raise ValueError('Mismatch between number of stations for data and LUT, {} - {}.'.format( nstn, tt.shape[-1])) @@ -104,24 +104,27 @@ def scan(sig, tt, fsmp, lsmp, map4d,threads): tcell = np.prod(ncell) if map4d.size < nsamp*tcell: raise ValueError('4D-Array is too small.') - _cmslib.scan4d(sig, tt, map4d, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int32(nstn), c_int64(tcell), c_int64(threads)) + if sig.size < nsamp + fsmp: + raise ValueError('Data array smaller than Coalescence array') -def detect(mmap, dsnr, dind, fsmp, lsmp, threads): + + _cmslib.scan4d(sig, tt, map4d, c_int32(fsmp), c_int32(lsmp),c_int32(nsamp), c_int32(nstn), c_int64(tcell), c_int64(threads)) + + +def detect(mmap, dsnr, dind, fsmp, lsmp,threads): nsamp = mmap.shape[-1] ncell = np.prod(mmap.shape[:-1]) if dsnr.size < nsamp or dind.size < nsamp: raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) - _cmslib.detect4d(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int64(ncell), c_int64(threads)) + _cmslib.detect4d(mmap, dsnr, dind, c_int32(fsmp),c_int32(lsmp),c_int32(nsamp), c_int64(ncell), c_int64(threads)) -def detect_t(mmap, dsnr, dind, fsmp, lsmp, threads): - nsamp = mmap.shape[0] - ncell = np.prod(mmap.shape[1:]) - if dsnr.size < nsamp or dind.size < nsamp: - raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) - _cmslib.detect4d_t(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), - c_int32(nsamp), c_int64(ncell), c_int64(threads)) +# def detect_t(mmap, dsnr, dind, fsmp, lsmp, threads): +# nsamp = mmap.shape[0] +# ncell = np.prod(mmap.shape[1:]) +# if dsnr.size < nsamp or dind.size < nsamp: +# raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp)) +# _cmslib.detect4d_t(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp), +# c_int32(nsamp), c_int64(ncell), c_int64(threads)) diff --git a/src/CMS/lib/cmslib.so b/src/CMS/lib/cmslib.so index 0a33ee6..266775d 100755 Binary files a/src/CMS/lib/cmslib.so and b/src/CMS/lib/cmslib.so differ diff --git a/src/CMS/lib/src/cmscan.c b/src/CMS/lib/src/cmscan.c index 84279a5..94f3188 100755 --- a/src/CMS/lib/src/cmscan.c +++ b/src/CMS/lib/src/cmscan.c @@ -39,28 +39,24 @@ EXPORT void scan4d(double *sigPt, int32_t *indPt, double *mapPt, int32_t fsmp, i omp_set_num_threads(threads); - /* stack data.... */ - to = MAX(0,MIN(fsmp,nsamp - 1)); - lsmp = MAX(to,MIN(lsmp,nsamp)); - #pragma omp parallel for private(cell,tm,st,stnPt,stkPt,ttpPt,ttp,tend) /* shared(mapPt) */ for (cell=0; cell mv) - { - mv = cv; - ix = cell; - } - } - snrPt[tm] = mv; - indPt[tm] = ix; - } -} +// EXPORT void detect4d_t(double *mapPt, double *snrPt, int64_t *indPt, int32_t fsmp, int32_t lsmp, int32_t nsamp, int64_t ncell, int64_t threads) +// { +// double *rPt, mv, cv; +// int32_t tm; +// int64_t cell, ix; + +// /* stack data.... */ + +// omp_set_num_threads(threads); + +// #pragma omp parallel for private(cell,tm,mv,ix,cv,rPt) shared(mapPt) +// for (tm=fsmp; tm mv) +// { +// mv = cv; +// ix = cell; +// } +// } +// snrPt[tm] = mv; +// indPt[tm] = ix; +// } +// } diff --git a/src/CMS/signal/__pycache__/scan.cpython-35.pyc b/src/CMS/signal/__pycache__/scan.cpython-35.pyc index f312a60..ef20015 100644 Binary files a/src/CMS/signal/__pycache__/scan.cpython-35.pyc and b/src/CMS/signal/__pycache__/scan.cpython-35.pyc differ diff --git a/src/CMS/signal/scan.py b/src/CMS/signal/scan.py index cd63e3f..a3b0e05 100755 --- a/src/CMS/signal/scan.py +++ b/src/CMS/signal/scan.py @@ -1143,26 +1143,42 @@ def _compute(self, cstart,cend, samples,station_avaliability): if self._map is None: #print(' Allocating memory: {}'.format(ncell + (tsamp,))) - self._map = np.zeros(ncell + (tsamp,), dtype=np.float64) - - dind = np.zeros(tsamp, np.int64) - dsnr = np.zeros(tsamp, np.double) - - ilib.scan(snr, tt, 0, pre_smp + nsamp +pos_smp, self._map, self.NumberOfCores) - ilib.detect(self._map, dsnr, dind, 0, pre_smp + nsamp +pos_smp, self.NumberOfCores) - - + self._map = np.zeros(ncell + (nsamp,), dtype=np.float64) + + dind = np.zeros(nsamp, np.int64) + dsnr = np.zeros(nsamp, np.double) + + # ilib.scan(snr, tt, 0, pre_smp + nsamp +pos_smp, self._map, self.NumberOfCores) + # ilib.detect(self._map, dsnr, dind, 0, pre_smp + nsamp +pos_smp, self.NumberOfCores) + # daten = np.arange((cstart+timedelta(seconds=self.pre_pad)), (cend + timedelta(seconds=-self.post_pad) + timedelta(seconds=1/srate)),timedelta(seconds=1/srate)) + # dsnr = np.exp((dsnr / nchan) - 1.0) + # #dsnr = classic_sta_lta(np.exp((dsnr / nchan) - 1.0),self.onset_win_p1[0]*self.sample_rate*0.5,self.onset_win_p1[1]*self.sample_rate*0.5) + # dsnr = dsnr[pre_smp:pre_smp + nsamp] + # dloc = self.lookup_table.index2xyz(dind[pre_smp:pre_smp + nsamp]) + # MAP = self._map[:,:,:,(pre_smp+1):pre_smp + nsamp] + + + print(snr.shape) + print(nsamp) + print(self.pre_pad) + print(self.post_pad) + ilib.scan(snr, tt, pre_smp, pos_smp, nsamp, self._map, self.NumberOfCores) + ilib.detect(self._map, dsnr, dind, 0,nsamp, self.NumberOfCores) daten = np.arange((cstart+timedelta(seconds=self.pre_pad)), (cend + timedelta(seconds=-self.post_pad) + timedelta(seconds=1/srate)),timedelta(seconds=1/srate)) dsnr = np.exp((dsnr / nchan) - 1.0) #dsnr = classic_sta_lta(np.exp((dsnr / nchan) - 1.0),self.onset_win_p1[0]*self.sample_rate*0.5,self.onset_win_p1[1]*self.sample_rate*0.5) - dsnr = dsnr[pre_smp:pre_smp + nsamp] - - - dloc = self.lookup_table.index2xyz(dind[pre_smp:pre_smp + nsamp]) + dsnr = dsnr + dloc = self.lookup_table.index2xyz(dind) + MAP = self._map - MAP = self._map[:,:,:,(pre_smp+1):pre_smp + nsamp] self._map = None + + print(daten) + print(daten.shape) + print(dsnr.shape) + print(dloc.shape) + print(MAP.shape) return daten, dsnr, dloc, MAP @@ -1891,8 +1907,8 @@ def Trigger(self,starttime,endtime): print('--Processing for Event {} of {} - {}'.format(e+1,len(EVENTS),(EVENTS['EventID'].iloc[e]).astype(str))) # Determining the Seismic event location - cstart = EVENTS['MinTime'].iloc[e] + timedelta(seconds = -2*(self.pre_pad+self.MarginalWindow)) - cend = EVENTS['MaxTime'].iloc[e] + timedelta(seconds = 2*(self.post_pad+self.MarginalWindow)) + cstart = EVENTS['MinTime'].iloc[e] + timedelta(seconds=-self.pre_pad) + cend = EVENTS['MaxTime'].iloc[e] + timedelta(seconds=self.post_pad) self.DATA.read_mseed(cstart.strftime('%Y-%m-%dT%H:%M:%S.%f'),cend.strftime('%Y-%m-%dT%H:%M:%S.%f'),self.sample_rate) daten, dsnr, dloc, self.MAP = self._compute(cstart,cend,self.DATA.signal,self.DATA.station_avaliability) @@ -1900,20 +1916,8 @@ def Trigger(self,starttime,endtime): dcoord = self.lookup_table.xyz2coord(np.array(dloc).astype(int)) EventCoaVal = pd.DataFrame(np.array((daten,dsnr,dcoord[:,0],dcoord[:,1],dcoord[:,2])).transpose(),columns=['DT','COA','X','Y','Z']) EventCoaVal['DT'] = pd.to_datetime(EventCoaVal['DT']) - - - # ----- Clipping the Coalescence and Values to the mariginal window size ---- - indMAX = EventCoaVal['COA'].astype('float').idxmax() - indVal_min = int(indMAX - self.MarginalWindow*float(self.sample_rate)) - indVal_max = round(indMAX + self.MarginalWindow*float(self.sample_rate)) - EventCoaVal = EventCoaVal[['DT','COA','X','Y','Z']].iloc[indVal_min:indVal_max].reset_index(drop=True) - - #print(indVal_min,indMAX,indVal_max) - - - self.MAP = self.MAP[:,:,:,indVal_min:indVal_max] self.EVENT = EventCoaVal - self.EVENT_max = self.EVENT.iloc[indMAX-indVal_min] + self.EVENT_max = self.EVENT.iloc[EventCoaVal['COA'].astype('float').idxmax()] # Determining the hypocentral location from the maximum over the marginal window. Picks,GAUP,GAUS = self._ArrivalTrigger(self.EVENT_max,(EVENTS['EventID'].iloc[e].astype(str)))