Skip to content

Commit

Permalink
Merge pull request #82 from trustimaging/iso-acoustic-masking
Browse files Browse the repository at this point in the history
Iso acoustic masking and improved vp updates
  • Loading branch information
ccuetom authored Jul 19, 2024
2 parents 223c2d7 + a0818bc commit a821bfd
Show file tree
Hide file tree
Showing 23 changed files with 325 additions and 158 deletions.
6 changes: 3 additions & 3 deletions docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,19 +81,19 @@ docker build --network=host --file docker/Dockerfile.stride --tag stride .
And to build the GPU image with `openacc` offloading and the `nvc` compiler, simply run:

```bash
docker build --build-arg base=devitocodes/base:nvidia-nvc --network=host --file docker/Dockerfile.stride --tag stride .
docker build --build-arg base=devitocodes/bases:nvidia-nvc --network=host --file docker/Dockerfile.stride --tag stride .
```

or if you wish to use the `llvm-15` (clang) compiler with `openmp` offlaoding:

```bash
docker build --build-arg base=devitocodes/base:nvidia-clang --network=host --file docker/Dockerfile.stride --tag stride .
docker build --build-arg base=devitocodes/bases:nvidia-clang --network=host --file docker/Dockerfile.stride --tag stride .
```

and finally for AMD architectures:

```bash
docker build --build-arg base=devitocodes/base:amd --network=host --file docker/Dockerfile.stride --tag stride .
docker build --build-arg base=devitocodes/bases:amd --network=host --file docker/Dockerfile.stride --tag stride .
```


Expand Down
4 changes: 2 additions & 2 deletions mosaic/cli/mprof.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ def __init__(self, label_id, label):
# EVENTS
#
# * Tessera
# Remote: init --> (listening --> running) --> collected
# Remote: init --> listening --> collected
# Proxy: pending --> init --> listening --> collected
#
# * Task
# Remote: init --> pending --> ready --> running --> done --> collected
# Remote: init --> pending --> done/failed --> collected
# Proxy: pending --> init --> queued --> (done --> result) --> collected


Expand Down
2 changes: 1 addition & 1 deletion mosaic/cli/mrun.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def start_runtime(*args, **extra_kwargs):
_runtime.init_file(runtime_config)

def run_head():
process = cmd_subprocess.run(cmd,
process = cmd_subprocess.run(' '.join(cmd), shell=True,
stdout=_stdout,
stderr=_stderr)

Expand Down
17 changes: 9 additions & 8 deletions mosaic/comms/comms.py
Original file line number Diff line number Diff line change
Expand Up @@ -771,19 +771,20 @@ def _process_send(self, method, cmd=None, reply=False, **kwargs):
'cmd': cmd,
}

msg = serialise(msg)
msg_size = sizeof(msg)

if not method.startswith('log') and not method.startswith('update_monitored_node'):
if method == 'cmd':
method = '%s:%s.%s' % (method, cmd['type'], cmd['method'])

self.logger.debug('Sending cmd %s %s to %s (%s) from %s' % (method, cmd['method'],
self.uid, cmd['uid'],
self._runtime.uid))
self.logger.debug('Sending cmd %s %s to %s (%s) from %s '
'(size %.2f MB)' % (method, cmd['method'], self.uid, cmd['uid'],
self._runtime.uid, msg_size/1024**2))
else:
self.logger.debug('Sending msg %s to %s from %s' % (method, self.uid,
self._runtime.uid))

msg = serialise(msg)
msg_size = sizeof(msg)
self.logger.debug('Sending msg %s to %s from %s '
'(size %.2f MB)' % (method, self.uid, self._runtime.uid,
msg_size/1024**2))

compression = []
compressed_msg = []
Expand Down
2 changes: 1 addition & 1 deletion mosaic/comms/serialisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def serialise(data):
"""
try:
return pickle5_dumps(data)
except pickle.PicklingError:
except (pickle.PicklingError, AttributeError):
return cloudpickle.dumps(data), []


Expand Down
1 change: 0 additions & 1 deletion mosaic/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,6 @@ def __deepcopy__(self, memo):
async def deregister(self):
try:
self.logger.debug('Garbage collected object %s' % self)
self.state_changed('collected')
except AttributeError:
pass

Expand Down
8 changes: 3 additions & 5 deletions mosaic/core/tessera.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,8 @@ async def listen_async(self):
if self._state != 'init':
return

self.state_changed('listening')

while True:
sender_id, task = await self._task_queue.get()
# Make sure that the loop does not keep implicit references to the task until the
Expand All @@ -288,12 +290,10 @@ async def run_async(self):
-------
"""
if self._state != 'init':
if self._state != 'listening':
return

while True:
self.state_changed('listening')

sender_id, task, future = await self._run_queue.get()
# Make sure that the loop does not keep implicit references to the task until the
# next task arrives in the queue
Expand All @@ -319,8 +319,6 @@ async def run_async(self):
self.obj.__class__.__name__))

await asyncio.sleep(0)
self.state_changed('running')
task.state_changed('running')
await self.logger.send()
await self.call_safe(sender_id, method, task)

Expand Down
4 changes: 2 additions & 2 deletions stride/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ async def forward(problem, pde, *args, **kwargs):
published_args = await asyncio.gather(*published_args)

platform = kwargs.get('platform', 'cpu')
using_gpu = platform in ['nvidia-acc', 'gpu']
using_gpu = 'nvidia' in platform or 'gpu' in platform
if using_gpu:
devices = kwargs.pop('devices', None)
num_gpus = gpu_count() if devices is None else len(devices)
Expand Down Expand Up @@ -251,7 +251,7 @@ async def adjoint(problem, pde, loss, optimisation_loop, optimiser, *args, **kwa
keep_residual = isinstance(step_size, LineSearch)

platform = kwargs.get('platform', 'cpu')
using_gpu = platform in ['nvidia-acc', 'gpu']
using_gpu = 'nvidia' in platform or 'gpu' in platform
if using_gpu:
devices = kwargs.pop('devices', None)
num_gpus = gpu_count() if devices is None else len(devices)
Expand Down
4 changes: 4 additions & 0 deletions stride/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ def __init__(self, *args, **kwargs):

self.grad = None
self.prec = None
self.transform = kwargs.pop('transform', None)

self.graph = Graph()
self.prev_op = None
Expand Down Expand Up @@ -384,6 +385,7 @@ def detach(self, *args, **kwargs):
"""
kwargs['name'] = kwargs.pop('name', self._init_name)
kwargs['needs_grad'] = kwargs.pop('needs_grad', self.needs_grad)
kwargs['transform'] = kwargs.pop('transform', self.transform)

if hasattr(self, 'has_tessera') and self.has_tessera:
cpy = self.__class__.parameter(*args, **kwargs)
Expand Down Expand Up @@ -411,6 +413,7 @@ def as_parameter(self, *args, **kwargs):
"""
kwargs['name'] = kwargs.pop('name', self._init_name)
kwargs['needs_grad'] = kwargs.pop('needs_grad', self.needs_grad)
kwargs['transform'] = kwargs.pop('transform', self.transform)

cpy = self.__class__.parameter(*args, **kwargs)

Expand All @@ -437,6 +440,7 @@ def copy(self, *args, **kwargs):
"""
kwargs['name'] = kwargs.pop('name', self._init_name)
kwargs['needs_grad'] = kwargs.pop('needs_grad', self.needs_grad)
kwargs['transform'] = kwargs.pop('transform', self.transform)

propagate_tessera = kwargs.pop('propagate_tessera', True)

Expand Down
4 changes: 2 additions & 2 deletions stride/optimisation/loss/l2_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, **kwargs):

self.residual = None

async def forward(self, modelled, observed, **kwargs):
def forward(self, modelled, observed, **kwargs):
problem = kwargs.pop('problem', None)
shot_id = problem.shot_id if problem is not None \
else kwargs.pop('shot_id', 0)
Expand All @@ -38,7 +38,7 @@ async def forward(self, modelled, observed, **kwargs):

return fun

async def adjoint(self, d_fun, modelled, observed, **kwargs):
def adjoint(self, d_fun, modelled, observed, **kwargs):
grad_modelled = None
if modelled.needs_grad:
grad_modelled = +np.asarray(d_fun) * self.residual.copy(name='modelledresidual')
Expand Down
6 changes: 3 additions & 3 deletions stride/optimisation/optimisers/gradient_descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,6 @@ async def pre_process(self, grad=None, processed_grad=None, **kwargs):
**kwargs)
return processed_grad

async def update_variable(self, step_size, direction):
self.variable.data[:] -= step_size * direction.data
return self.variable
def update_variable(self, step_size, variable, direction):
variable.data[:] -= step_size * direction.data
return variable
35 changes: 29 additions & 6 deletions stride/optimisation/optimisers/optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,14 @@ async def pre_process(self, grad=None, processed_grad=None, **kwargs):
logger = mosaic.logger()
logger.perf('Updating variable %s,' % self.variable.name)

problem = kwargs.pop('problem', None)
iteration = kwargs.pop('iteration', None)

if processed_grad is None:
if grad is None:
if hasattr(self.variable, 'is_proxy') and self.variable.is_proxy:
await self.variable.pull(attr='grad')

problem = kwargs.pop('problem', None)
iteration = kwargs.pop('iteration', None)
dump_grad = kwargs.pop('dump_grad', self.dump_grad)
dump_prec = kwargs.pop('dump_prec', self.dump_prec)
if dump_grad and problem is not None:
Expand All @@ -95,6 +96,7 @@ async def pre_process(self, grad=None, processed_grad=None, **kwargs):
if dump_prec and self.variable.grad.prec is not None and problem is not None:
self.variable.grad.prec.dump(path=problem.output_folder,
project_name=problem.name,
parameter='raw_%s' % self.variable.grad.prec.name,
version=iteration.abs_id+1)

grad = self.variable.process_grad(**kwargs)
Expand All @@ -104,6 +106,11 @@ async def pre_process(self, grad=None, processed_grad=None, **kwargs):
project_name=problem.name,
version=iteration.abs_id+1)

if dump_prec and self.variable.grad.prec is not None and problem is not None:
self.variable.grad.prec.dump(path=problem.output_folder,
project_name=problem.name,
version=iteration.abs_id+1)

min_dir = np.min(grad.data)
max_dir = np.max(grad.data)

Expand All @@ -112,6 +119,13 @@ async def pre_process(self, grad=None, processed_grad=None, **kwargs):

processed_grad = await self._process_grad(grad, variable=self.variable, **kwargs)

dump_processed_grad = kwargs.pop('dump_processed_grad', self.dump_grad)
if dump_processed_grad and problem is not None:
processed_grad.dump(path=problem.output_folder,
project_name=problem.name,
parameter='processed_%s' % self.variable.grad.name,
version=iteration.abs_id + 1)

test_step_size = kwargs.pop('test_step_size', self.test_step_size)
processed_grad.data[:] *= test_step_size

Expand Down Expand Up @@ -163,7 +177,7 @@ async def step(self, step_size=None, grad=None, processed_grad=None, **kwargs):
step_size = self.step_size if step_size is None else step_size
step_loop = kwargs.pop('step_loop', None)
if isinstance(step_size, LineSearch):
await step_size.init_search(
step_size.init_search(
variable=self.variable,
direction=direction,
**kwargs
Expand All @@ -177,7 +191,7 @@ async def step(self, step_size=None, grad=None, processed_grad=None, **kwargs):
next_step = 1.
done_search = True
else:
next_step, done_search = await step_size.next_step(
next_step, done_search = step_size.next_step(
variable=self.variable,
direction=direction,
**kwargs
Expand Down Expand Up @@ -208,7 +222,14 @@ async def step(self, step_size=None, grad=None, processed_grad=None, **kwargs):
self.variable.data[:] = variable_before.data.copy()

# update variable
await self.update_variable(next_step, direction)
if self.variable.transform is not None:
variable = self.variable.transform(self.variable)
else:
variable = self.variable
upd_variable = self.update_variable(next_step, variable, direction)
if self.variable.transform is not None:
upd_variable = self.variable.transform(upd_variable)
self.variable.data[:] = upd_variable.data.copy()

# post-process variable after update
await self.post_process(**kwargs)
Expand Down Expand Up @@ -254,13 +275,15 @@ async def post_process(self, **kwargs):
self.variable.release_grad()

@abstractmethod
async def update_variable(self, step_size, direction):
def update_variable(self, step_size, variable, direction):
"""
Parameters
----------
step_size : float
Step size to use for updating the variable.
variable : Data
Variable to update.
direction : Data
Direction in which to update the variable.
Expand Down
36 changes: 34 additions & 2 deletions stride/optimisation/pipelines/steps/mask_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,33 @@
from ....core import Operator


def _rampoff_mask(shape, ramp_size):
mask = np.ones(shape, dtype=np.float32)

for dim_i in range(len(shape)):
if 2*ramp_size > shape[dim_i]:
continue
for index in range(ramp_size):
pos = np.abs((ramp_size - index - 1) / float(ramp_size - 1))
val = 1 - np.cos(np.pi / 2 * (1 - pos))

# : slices
all_ind = [slice(index, s - index + 1) for s in shape]

# Left slice
all_ind[dim_i] = index
mask[tuple(all_ind)] = val

# : slices
all_ind = [slice(index, s - index + 1) for s in shape]

# right slice
all_ind[dim_i] = -index
mask[tuple(all_ind)] = val

return mask


class MaskField(Operator):
"""
Mask a StructuredData object to remove values outside inner domain.
Expand All @@ -17,13 +44,18 @@ class MaskField(Operator):
def __init__(self, **kwargs):
super().__init__(**kwargs)

self.mask_rampoff = kwargs.pop('mask_rampoff', 10)
self._mask = kwargs.pop('mask', None)

def forward(self, field, **kwargs):
mask = kwargs.pop('mask', self._mask)
mask = kwargs.pop('mask', None)
mask_rampoff = kwargs.pop('mask_rampoff', self.mask_rampoff)
mask = self._mask if mask is None else mask
if mask is None:
mask = np.zeros(field.extended_shape)
mask = np.zeros(field.extended_shape, dtype=np.float32)
mask[field.inner] = 1
mask *= _rampoff_mask(mask.shape, mask_rampoff)
self._mask = mask

out_field = field.alike(name=name_from_op_name(self, field))
out_field.extended_data[:] = field.extended_data
Expand Down
17 changes: 16 additions & 1 deletion stride/optimisation/pipelines/steps/norm_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,32 @@ def __init__(self, **kwargs):
super().__init__(**kwargs)

self.global_norm = kwargs.pop('global_norm', False)
self.norm_guess_change = kwargs.pop('norm_guess_change', 0.5)
self.norm_value = None

def forward(self, field, **kwargs):
variable = kwargs.pop('variable', None)
global_norm = kwargs.pop('global_norm', self.global_norm)
norm_guess_change = kwargs.pop('norm_guess_change', self.norm_guess_change)

if self.norm_value is None or not global_norm:
self.norm_value = np.max(np.abs(field.extended_data)) + 1e-31

# work out guess change based on field value
if variable is not None:
min_val = np.min(variable.extended_data)
max_val = np.max(variable.extended_data)

mid_val = (max_val + min_val) / 2.0
if variable.transform is not None:
mid_val = variable.transform(mid_val)
var_corr = mid_val * norm_guess_change / 100
else:
var_corr = 1.

out_field = field.alike(name=name_from_op_name(self, field))
out_field.extended_data[:] = field.extended_data
out_field.extended_data[:] /= self.norm_value
out_field.extended_data[:] *= var_corr / self.norm_value

return out_field

Expand Down
Loading

0 comments on commit a821bfd

Please sign in to comment.