From d3192bb8c284980ddf1522e894d4a01df70ff644 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 09:06:50 -0500 Subject: [PATCH 001/136] Read `Symbol` tick precision fields when no entry in `.broker_info` --- piker/pp.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/piker/pp.py b/piker/pp.py index 38ff156647..a01bdc4e71 100644 --- a/piker/pp.py +++ b/piker/pp.py @@ -199,8 +199,16 @@ def to_pretoml(self) -> tuple[str, dict]: sym_info = s.broker_info[broker] d['asset_type'] = sym_info['asset_type'] - d['price_tick_size'] = sym_info['price_tick_size'] - d['lot_tick_size'] = sym_info['lot_tick_size'] + d['price_tick_size'] = ( + sym_info.get('price_tick_size') + or + s.tick_size + ) + d['lot_tick_size'] = ( + sym_info.get('lot_tick_size') + or + s.lot_tick_size + ) if self.expiry is None: d.pop('expiry', None) From aba238e8b1f9533c8085fb2649cd09d49c4e1cbc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 17:22:13 -0500 Subject: [PATCH 002/136] `kraken`: expect `Pair` in search results.. --- piker/brokers/kraken/feed.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/piker/brokers/kraken/feed.py b/piker/brokers/kraken/feed.py index b4a2e666f5..a737aaadba 100644 --- a/piker/brokers/kraken/feed.py +++ b/piker/brokers/kraken/feed.py @@ -453,7 +453,7 @@ async def open_symbol_search( score_cutoff=50, ) # repack in dict form - await stream.send( - {item[0]['altname']: item[0] - for item in matches} - ) + await stream.send({ + pair[0].altname: pair[0] + for pair in matches + }) From fe0695fb7b773a01b29051237d20db81c557f8e2 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 29 Jan 2023 15:17:39 -0500 Subject: [PATCH 003/136] First draft storage layer cli Adds a `piker storage` subcmd with a `-d` flag to wipe a particular fqsn's time series (both 1s and 60s). Obviously this needs to be extended much more but provides a start point. --- piker/data/cli.py | 73 ++++++++++++++++++++++++++++++++++----- piker/data/marketstore.py | 23 +++++------- 2 files changed, 73 insertions(+), 23 deletions(-) diff --git a/piker/data/cli.py b/piker/data/cli.py index 554048a464..994b9da431 100644 --- a/piker/data/cli.py +++ b/piker/data/cli.py @@ -19,7 +19,10 @@ """ from functools import partial -from pprint import pformat +from pprint import ( + pformat, + pprint, +) from anyio_marketstore import open_marketstore_client import trio @@ -113,15 +116,11 @@ async def main(): @cli.command() @click.option( - '--tl', - is_flag=True, - help='Enable tractor logging') -@click.option( - '--host', + '--tsdb_host', default='localhost' ) @click.option( - '--port', + '--tsdb_port', default=5993 ) @click.argument('symbols', nargs=-1) @@ -137,7 +136,7 @@ def storesh( Start an IPython shell ready to query the local marketstore db. ''' - from piker.data.marketstore import tsdb_history_update + from piker.data.marketstore import open_tsdb_client from piker._daemon import open_piker_runtime async def main(): @@ -148,7 +147,63 @@ async def main(): enable_modules=['piker.data._ahab'], ): symbol = symbols[0] - await tsdb_history_update(symbol) + + async with open_tsdb_client(symbol) as storage: + # TODO: ask if user wants to write history for detected + # available shm buffers? + from tractor.trionics import ipython_embed + await ipython_embed() + + trio.run(main) + + +@cli.command() +@click.option( + '--host', + default='localhost' +) +@click.option( + '--port', + default=5993 +) +@click.option( + '--delete', + '-d', + is_flag=True, + help='Delete history (1 Min) for symbol(s)', +) +@click.argument('symbols', nargs=-1) +@click.pass_obj +def storage( + config, + host, + port, + symbols: list[str], + delete: bool, + +): + ''' + Start an IPython shell ready to query the local marketstore db. + + ''' + from piker.data.marketstore import open_tsdb_client + from piker._daemon import open_piker_runtime + + async def main(): + nonlocal symbols + + async with open_piker_runtime( + 'tsdb_storage', + enable_modules=['piker.data._ahab'], + ): + symbol = symbols[0] + async with open_tsdb_client(symbol) as storage: + if delete: + for fqsn in symbols: + syms = await storage.client.list_symbols() + breakpoint() + await storage.delete_ts(fqsn, 60) + await storage.delete_ts(fqsn, 1) trio.run(main) diff --git a/piker/data/marketstore.py b/piker/data/marketstore.py index 190667d63f..792396e38e 100644 --- a/piker/data/marketstore.py +++ b/piker/data/marketstore.py @@ -510,7 +510,6 @@ async def delete_ts( client = self.client syms = await client.list_symbols() - print(syms) if key not in syms: raise KeyError(f'`{key}` table key not found in\n{syms}?') @@ -627,10 +626,10 @@ async def open_storage_client( yield Storage(client) -async def tsdb_history_update( - fqsn: Optional[str] = None, - -) -> list[str]: +@acm +async def open_tsdb_client( + fqsn: str, +) -> Storage: # TODO: real-time dedicated task for ensuring # history consistency between the tsdb, shm and real-time feed.. @@ -659,7 +658,7 @@ async def tsdb_history_update( # - https://github.com/pikers/piker/issues/98 # profiler = Profiler( - disabled=False, # not pg_profile_enabled(), + disabled=True, # not pg_profile_enabled(), delayed=False, ) @@ -700,14 +699,10 @@ async def tsdb_history_update( # profiler('Finished db arrays diffs') - syms = await storage.client.list_symbols() - log.info(f'Existing tsdb symbol set:\n{pformat(syms)}') - profiler(f'listed symbols {syms}') - - # TODO: ask if user wants to write history for detected - # available shm buffers? - from tractor.trionics import ipython_embed - await ipython_embed() + syms = await storage.client.list_symbols() + # log.info(f'Existing tsdb symbol set:\n{pformat(syms)}') + # profiler(f'listed symbols {syms}') + yield storage # for array in [to_append, to_prepend]: # if array is None: From 7b196b1b97578789795a50e95c70fc33b47e63a0 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 12:56:56 -0500 Subject: [PATCH 004/136] Support startup-config overrides to `ahabd` super With the addition of a new `elastixsearch` docker support in https://github.com/pikers/piker/pull/464, adjustments were made to container startup sync logic (particularly the `trio` checkpoint sleep period - which itself is a hack around a sync client api) which caused a regression in upstream startup logic wherein container error logs were not being bubbled up correctly causing a silent failure mode: - `marketstore` container started with corrupt input config - `ahabd` super code timed out on startup phase due to a larger log polling period, skipped processing startup logs from the container, and continued on as though the container was started - history client fails on grpc connection with no clear error on why the connection failed. Here we revert to the old poll period (1ms) to avoid any more silent failures and further extend supervisor control through a configuration override mechanism. To address the underlying design issue, this patch adds support for container-endpoint-callbacks to override supervisor startup configuration parameters via the 2nd value in their returned tuple: the already delivered configuration `dict` value. The current exposed values include: { 'startup_timeout': 1.0, 'startup_query_period': 0.001, 'log_msg_key': 'msg', }, This allows for container specific control over the startup-sync query period (the hack mentioned above) as well as the expected log msg key and of course the startup timeout. --- piker/data/_ahab.py | 85 ++++++++++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 25 deletions(-) diff --git a/piker/data/_ahab.py b/piker/data/_ahab.py index 39a5b46a1d..66b41f389d 100644 --- a/piker/data/_ahab.py +++ b/piker/data/_ahab.py @@ -18,6 +18,7 @@ Supervisor for docker with included specific-image service helpers. ''' +from collections import ChainMap import os import time from typing import ( @@ -124,10 +125,19 @@ def __init__( async def process_logs_until( self, + log_msg_key: str, + # this is a predicate func for matching log msgs emitted by the # underlying containerized app patt_matcher: Callable[[str], bool], - bp_on_msg: bool = False, + + # XXX WARNING XXX: do not touch this sleep value unless + # you know what you are doing! the value is critical to + # making sure the caller code inside the startup context + # does not timeout BEFORE we receive a match on the + # ``patt_matcher()`` predicate above. + checkpoint_period: float = 0.001, + ) -> bool: ''' Attempt to capture container log messages and relay through our @@ -137,12 +147,14 @@ async def process_logs_until( seen_so_far = self.seen_so_far while True: + logs = self.cntr.logs() try: logs = self.cntr.logs() except ( docker.errors.NotFound, docker.errors.APIError ): + log.exception('Failed to parse logs?') return False entries = logs.decode().split('\n') @@ -155,25 +167,23 @@ async def process_logs_until( entry = entry.strip() try: record = json.loads(entry) - - if 'msg' in record: - msg = record['msg'] - elif 'message' in record: - msg = record['message'] - else: - raise KeyError(f'Unexpected log format\n{record}') - + msg = record[log_msg_key] level = record['level'] except json.JSONDecodeError: msg = entry level = 'error' - if msg and entry not in seen_so_far: - seen_so_far.add(entry) - if bp_on_msg: - await tractor.breakpoint() + # TODO: do we need a more general mechanism + # for these kinda of "log record entries"? + # if 'Error' in entry: + # raise RuntimeError(entry) + if ( + msg + and entry not in seen_so_far + ): + seen_so_far.add(entry) getattr(log, level.lower(), log.error)(f'{msg}') if level == 'fatal': @@ -183,7 +193,7 @@ async def process_logs_until( return True # do a checkpoint so we don't block if cancelled B) - await trio.sleep(0.1) + await trio.sleep(checkpoint_period) return False @@ -301,7 +311,6 @@ async def cancel( async def open_ahabd( ctx: tractor.Context, endpoint: str, # ns-pointer str-msg-type - start_timeout: float = 1.0, **kwargs, @@ -322,16 +331,39 @@ async def open_ahabd( ) = ep_func(client) cntr = Container(dcntr) - with trio.move_on_after(start_timeout): - found = await cntr.process_logs_until(start_lambda) + conf: ChainMap[str, Any] = ChainMap( - if not found and dcntr not in client.containers.list(): - for entry in cntr.seen_so_far: - log.info(entry) + # container specific + cntr_config, - raise RuntimeError( - f'Failed to start {dcntr.id} check logs deats' - ) + # defaults + { + 'startup_timeout': 1.0, + 'startup_query_period': 0.001, + 'log_msg_key': 'msg', + }, + ) + + found = False + with trio.move_on_after(conf['startup_timeout']): + found = await cntr.process_logs_until( + conf['log_msg_key'], + start_lambda, + checkpoint_period=conf['startup_query_period'], + ) + + # XXX: if we timeout on finding the "startup msg" we expect then + # we want to FOR SURE raise an error upwards! + if ( + not found + and dcntr not in client.containers.list() + ): + for entry in cntr.seen_so_far: + log.info(entry) + + raise RuntimeError( + f'Failed to start {dcntr.id} check logs deats' + ) await ctx.started(( cntr.cntr.id, @@ -346,13 +378,17 @@ async def open_ahabd( await trio.sleep_forever() finally: + # TODO: ensure loglevel can be set and teardown logs are + # reported if possible on error or cancel.. + # with trio.CancelScope(shield=True): await cntr.cancel(stop_lambda) async def start_ahab( service_name: str, endpoint: Callable[docker.DockerClient, DockerContainer], - start_timeout: float = 1.0, + loglevel: str | None = None, + task_status: TaskStatus[ tuple[ trio.Event, @@ -400,7 +436,6 @@ async def start_ahab( async with portal.open_context( open_ahabd, endpoint=str(NamespacePath.from_ref(endpoint)), - start_timeout=start_timeout ) as (ctx, first): cid, pid, cntr_config = first From 959e423849bb96192e5542e385158fe174a373a5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 12:59:20 -0500 Subject: [PATCH 005/136] Add warning around detach flag to docker client --- piker/data/marketstore.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/piker/data/marketstore.py b/piker/data/marketstore.py index 792396e38e..4cfc18268b 100644 --- a/piker/data/marketstore.py +++ b/piker/data/marketstore.py @@ -185,7 +185,11 @@ def start_marketstore( config_dir_mnt, data_dir_mnt, ], + + # XXX: this must be set to allow backgrounding/non-blocking + # usage interaction with the container's process. detach=True, + # stop_signal='SIGINT', init=True, # remove=True, From 8c66f066bdab8d9f52c2d5bd35a6632f7afbc050 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 13:00:11 -0500 Subject: [PATCH 006/136] Deliver es specific ahab-super in endpoint startup config --- piker/data/elastic.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/piker/data/elastic.py b/piker/data/elastic.py index 43c6afd082..fadcaa5e8e 100644 --- a/piker/data/elastic.py +++ b/piker/data/elastic.py @@ -15,17 +15,11 @@ # along with this program. If not, see . from __future__ import annotations -from contextlib import asynccontextmanager as acm -from pprint import pformat from typing import ( Any, TYPE_CHECKING, ) -import pyqtgraph as pg -import numpy as np -import tractor - if TYPE_CHECKING: import docker @@ -65,14 +59,14 @@ def start_elasticsearch( -itd \ --rm \ --network=host \ - --mount type=bind,source="$(pwd)"/elastic,target=/usr/share/elasticsearch/data \ + --mount type=bind,source="$(pwd)"/elastic,\ + target=/usr/share/elasticsearch/data \ --env "elastic_username=elastic" \ --env "elastic_password=password" \ --env "xpack.security.enabled=false" \ elastic ''' - import docker get_console_log('info', name=__name__) dcntr: DockerContainer = client.containers.run( @@ -86,7 +80,7 @@ def start_elasticsearch( async def start_matcher(msg: str): try: health = (await asks.get( - f'http://localhost:19200/_cat/health', + 'http://localhost:19200/_cat/health', params={'format': 'json'} )).json() @@ -102,7 +96,17 @@ async def stop_matcher(msg: str): return ( dcntr, - {}, + { + # apparently we're REALLY tolerant of startup latency + # for CI XD + 'startup_timeout': 240.0, + + # XXX: decrease http poll period bc docker + # is shite at handling fast poll rates.. + 'startup_query_period': 0.1, + + 'log_msg_key': 'message', + }, # expected startup and stop msgs start_matcher, stop_matcher, From 05b67c27d081c4e4ad3b04a8d7b73737658b43de Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 13:01:42 -0500 Subject: [PATCH 007/136] Apply `Services` runtime state **immediately** inside starup block --- piker/_daemon.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/piker/_daemon.py b/piker/_daemon.py index 8983ecccf0..c0c1df796a 100644 --- a/piker/_daemon.py +++ b/piker/_daemon.py @@ -337,7 +337,6 @@ async def open_pikerd( alive underling services (see below). ''' - async with ( open_piker_runtime( @@ -355,7 +354,13 @@ async def open_pikerd( tractor.open_nursery() as actor_nursery, trio.open_nursery() as service_nursery, ): - assert root_actor.accept_addr == reg_addr + if root_actor.accept_addr != reg_addr: + raise RuntimeError(f'Daemon failed to bind on {reg_addr}!?') + + # assign globally for future daemon/task creation + Services.actor_n = actor_nursery + Services.service_n = service_nursery + Services.debug_mode = debug_mode if tsdb: from piker.data._ahab import start_ahab @@ -366,6 +371,7 @@ async def open_pikerd( start_ahab, 'marketstored', start_marketstore, + loglevel, ) log.info( @@ -385,7 +391,6 @@ async def open_pikerd( start_ahab, 'elasticsearch', start_elasticsearch, - start_timeout=240.0 # high cause ci ) ) @@ -396,12 +401,6 @@ async def open_pikerd( f'config: {pformat(config)}' ) - # assign globally for future daemon/task creation - Services.actor_n = actor_nursery - Services.service_n = service_nursery - Services.debug_mode = debug_mode - - try: yield Services @@ -695,7 +694,10 @@ async def maybe_spawn_brokerd( f'brokerd.{brokername}', service_task_target=spawn_brokerd, - spawn_args={'brokername': brokername, 'loglevel': loglevel}, + spawn_args={ + 'brokername': brokername, + 'loglevel': loglevel, + }, loglevel=loglevel, **kwargs, From b078a066212a24c2ea16465fcc6b1d861288fee8 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 14:22:23 -0500 Subject: [PATCH 008/136] Doc string and types bump in loggin mod --- piker/log.py | 46 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/piker/log.py b/piker/log.py index 804e09dc6e..a36beec023 100644 --- a/piker/log.py +++ b/piker/log.py @@ -21,7 +21,11 @@ import json import tractor -from pygments import highlight, lexers, formatters +from pygments import ( + highlight, + lexers, + formatters, +) # Makes it so we only see the full module name when using ``__name__`` # without the extra "piker." prefix. @@ -32,26 +36,48 @@ def get_logger( name: str = None, ) -> logging.Logger: - '''Return the package log or a sub-log for `name` if provided. + ''' + Return the package log or a sub-log for `name` if provided. + ''' return tractor.log.get_logger(name=name, _root_name=_proj_name) -def get_console_log(level: str = None, name: str = None) -> logging.Logger: - '''Get the package logger and enable a handler which writes to stderr. +def get_console_log( + level: str | None = None, + name: str | None = None, + +) -> logging.Logger: + ''' + Get the package logger and enable a handler which writes to stderr. Yeah yeah, i know we can use ``DictConfig``. You do it... + ''' return tractor.log.get_console_log( - level, name=name, _root_name=_proj_name) # our root logger + level, + name=name, + _root_name=_proj_name, + ) # our root logger -def colorize_json(data, style='algol_nu'): - """Colorize json output using ``pygments``. - """ - formatted_json = json.dumps(data, sort_keys=True, indent=4) +def colorize_json( + data: dict, + style='algol_nu', +): + ''' + Colorize json output using ``pygments``. + + ''' + formatted_json = json.dumps( + data, + sort_keys=True, + indent=4, + ) return highlight( - formatted_json, lexers.JsonLexer(), + formatted_json, + lexers.JsonLexer(), + # likeable styles: algol_nu, tango, monokai formatters.TerminalTrueColorFormatter(style=style) ) From 7694419e712bc7c11f756a87f1aba1cce5653c80 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 14:28:48 -0500 Subject: [PATCH 009/136] Background docker-container logs processing Previously we would make the `ahabd` supervisor-actor sync to docker container startup using pseudo-blocking log message processing. This has issues, - we're forced to do a hacky "yield back to `trio`" in order to be "fake async" when reading the log stream and further, - blocking on a message is fragile and often slow. Instead, run the log processor in a background task and in the parent task poll for the container to be in the client list using a similar pseudo-async poll pattern. This allows the super to `Context.started()` sooner (when the container is actually registered as "up") and thus unblock its (remote) caller faster whilst still doing full log msg proxying! Deatz: - adds `Container.cuid: str` a unique container id for logging. - correctly proxy through the `loglevel: str` from `pikerd` caller task. - shield around `Container.cancel()` in the teardown block and use cancel level logging in that method. --- piker/data/_ahab.py | 125 ++++++++++++++++++++++++++++++-------------- 1 file changed, 87 insertions(+), 38 deletions(-) diff --git a/piker/data/_ahab.py b/piker/data/_ahab.py index 66b41f389d..d2e042e3d2 100644 --- a/piker/data/_ahab.py +++ b/piker/data/_ahab.py @@ -19,6 +19,7 @@ ''' from collections import ChainMap +from functools import partial import os import time from typing import ( @@ -46,7 +47,10 @@ ReadTimeout, ) -from ..log import get_logger, get_console_log +from ..log import ( + get_logger, + get_console_log, +) from .. import config log = get_logger(__name__) @@ -197,6 +201,11 @@ async def process_logs_until( return False + @property + def cuid(self) -> str: + fqcn: str = self.cntr.attrs['Config']['Image'] + return f'{fqcn}[{self.cntr.short_id}]' + def try_signal( self, signal: str = 'SIGINT', @@ -232,17 +241,23 @@ def hard_kill(self, start: float) -> None: async def cancel( self, - stop_msg: str, + log_msg_key: str, + stop_predicate: Callable[[str], bool], + hard_kill: bool = False, ) -> None: + ''' + Attempt to cancel this container gracefully, fail over to + a hard kill on timeout. + ''' cid = self.cntr.id # first try a graceful cancel log.cancel( - f'SIGINT cancelling container: {cid}\n' - f'waiting on stop msg: "{stop_msg}"' + f'SIGINT cancelling container: {self.cuid}\n' + 'waiting on stop predicate...' ) self.try_signal('SIGINT') @@ -253,7 +268,10 @@ async def cancel( log.cancel('polling for CNTR logs...') try: - await self.process_logs_until(stop_msg) + await self.process_logs_until( + log_msg_key, + stop_predicate, + ) except ApplicationLogError: hard_kill = True else: @@ -311,11 +329,16 @@ async def cancel( async def open_ahabd( ctx: tractor.Context, endpoint: str, # ns-pointer str-msg-type + loglevel: str | None = 'cancel', **kwargs, ) -> None: - get_console_log('info', name=__name__) + + log = get_console_log( + loglevel, + name=__name__, + ) async with open_docker() as client: @@ -338,40 +361,63 @@ async def open_ahabd( # defaults { + # startup time limit which is the max the supervisor + # will wait for the container to be registered in + # ``client.containers.list()`` 'startup_timeout': 1.0, + + # how fast to poll for the starup predicate by sleeping + # this amount incrementally thus yielding to the + # ``trio`` scheduler on during sync polling execution. 'startup_query_period': 0.001, + + # str-key value expected to contain log message body-contents + # when read using: + # ``json.loads(entry for entry in DockerContainer.logs())`` 'log_msg_key': 'msg', }, ) - found = False - with trio.move_on_after(conf['startup_timeout']): - found = await cntr.process_logs_until( - conf['log_msg_key'], - start_lambda, - checkpoint_period=conf['startup_query_period'], - ) + with trio.move_on_after(conf['startup_timeout']) as cs: + async with trio.open_nursery() as tn: + tn.start_soon( + partial( + cntr.process_logs_until, + log_msg_key=conf['log_msg_key'], + patt_matcher=start_lambda, + checkpoint_period=conf['startup_query_period'], + ) + ) - # XXX: if we timeout on finding the "startup msg" we expect then - # we want to FOR SURE raise an error upwards! - if ( - not found - and dcntr not in client.containers.list() - ): - for entry in cntr.seen_so_far: - log.info(entry) - - raise RuntimeError( - f'Failed to start {dcntr.id} check logs deats' - ) + # poll for container startup or timeout + while not cs.cancel_called: + if dcntr in client.containers.list(): + break - await ctx.started(( - cntr.cntr.id, - os.getpid(), - cntr_config, - )) + await trio.sleep(conf['startup_query_period']) + + # sync with remote caller actor-task but allow log + # processing to continue running in bg. + await ctx.started(( + cntr.cntr.id, + os.getpid(), + cntr_config, + )) try: + # XXX: if we timeout on finding the "startup msg" we expect then + # we want to FOR SURE raise an error upwards! + if cs.cancelled_caught: + # if dcntr not in client.containers.list(): + for entry in cntr.seen_so_far: + log.info(entry) + + raise DockerNotStarted( + f'Failed to start container: {dcntr.cuid}\n' + f'due to startup_timeout={conf["startup_timeout"]}s\n\n' + "prolly you should check your container's logs for deats.." + ) + # TODO: we might eventually want a proxy-style msg-prot here # to allow remote control of containers without needing # callers to have root perms? @@ -380,14 +426,17 @@ async def open_ahabd( finally: # TODO: ensure loglevel can be set and teardown logs are # reported if possible on error or cancel.. - # with trio.CancelScope(shield=True): - await cntr.cancel(stop_lambda) + with trio.CancelScope(shield=True): + await cntr.cancel( + log_msg_key=conf['log_msg_key'], + stop_predicate=stop_lambda, + ) async def start_ahab( service_name: str, endpoint: Callable[docker.DockerClient, DockerContainer], - loglevel: str | None = None, + loglevel: str | None = 'cancel', task_status: TaskStatus[ tuple[ @@ -409,13 +458,12 @@ async def start_ahab( ''' cn_ready = trio.Event() try: - async with tractor.open_nursery( - loglevel='runtime', - ) as tn: + async with tractor.open_nursery() as an: - portal = await tn.start_actor( + portal = await an.start_actor( service_name, - enable_modules=[__name__] + enable_modules=[__name__], + loglevel=loglevel, ) # TODO: we have issues with this on teardown @@ -436,6 +484,7 @@ async def start_ahab( async with portal.open_context( open_ahabd, endpoint=str(NamespacePath.from_ref(endpoint)), + loglevel=loglevel, ) as (ctx, first): cid, pid, cntr_config = first From bb723abc9d774fa2ce61269c1c97155d1698a20b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 14:56:21 -0500 Subject: [PATCH 010/136] Always passthrough loglevel to `ahabd` supervisor --- piker/_daemon.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/piker/_daemon.py b/piker/_daemon.py index c0c1df796a..1a8576a138 100644 --- a/piker/_daemon.py +++ b/piker/_daemon.py @@ -368,10 +368,12 @@ async def open_pikerd( log.info('Spawning `marketstore` supervisor') ctn_ready, config, (cid, pid) = await service_nursery.start( - start_ahab, - 'marketstored', - start_marketstore, - loglevel, + partial( + start_ahab, + 'marketstored', + start_marketstore, + loglevel=loglevel, + ) ) log.info( @@ -391,6 +393,7 @@ async def open_pikerd( start_ahab, 'elasticsearch', start_elasticsearch, + loglevel=loglevel, ) ) From 56629b6b2e584e58fa4dd7167ce2e7c4db43e6f5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:00:24 -0500 Subject: [PATCH 011/136] Hardcode `cancel` log level for `ahabd` for now --- piker/data/_ahab.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/piker/data/_ahab.py b/piker/data/_ahab.py index d2e042e3d2..2c0230f109 100644 --- a/piker/data/_ahab.py +++ b/piker/data/_ahab.py @@ -413,7 +413,7 @@ async def open_ahabd( log.info(entry) raise DockerNotStarted( - f'Failed to start container: {dcntr.cuid}\n' + f'Failed to start container: {cntr.cuid}\n' f'due to startup_timeout={conf["startup_timeout"]}s\n\n' "prolly you should check your container's logs for deats.." ) @@ -484,7 +484,7 @@ async def start_ahab( async with portal.open_context( open_ahabd, endpoint=str(NamespacePath.from_ref(endpoint)), - loglevel=loglevel, + loglevel='cancel', ) as (ctx, first): cid, pid, cntr_config = first From bfe3ea1f59bef5986b2ebb069d1633abfdf90e6b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:01:06 -0500 Subject: [PATCH 012/136] Set explicit `marketstore` container startup timeout --- piker/data/marketstore.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/piker/data/marketstore.py b/piker/data/marketstore.py index 4cfc18268b..6e3ed78f03 100644 --- a/piker/data/marketstore.py +++ b/piker/data/marketstore.py @@ -63,11 +63,12 @@ log = get_logger(__name__) -# container level config +# ahabd-supervisor and container level config _config = { 'grpc_listen_port': 5995, 'ws_listen_port': 5993, 'log_level': 'debug', + 'startup_timeout': 1, } _yaml_config = ''' From 93c81fa4d1c4dc877dde0c81a5664e78cb4db635 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:14:39 -0500 Subject: [PATCH 013/136] Start `piker.service` sub-package For now just moves everything that was in `piker._daemon` to a subpkg module but a reorg is coming pronto! --- piker/__init__.py | 8 ++++---- piker/brokers/cli.py | 21 +++++++++++++-------- piker/brokers/core.py | 2 +- piker/clearing/_client.py | 7 +++++-- piker/cli/__init__.py | 14 ++++++++------ piker/data/_ahab.py | 14 +++++++++----- piker/data/_sampling.py | 8 ++++---- piker/data/cli.py | 4 ++-- piker/data/feed.py | 2 +- piker/{_daemon.py => service/__init__.py} | 13 ++++++------- piker/ui/_app.py | 2 +- piker/ui/_exec.py | 2 +- piker/ui/cli.py | 2 +- tests/conftest.py | 12 ++++++------ tests/test_databases.py | 2 +- tests/test_services.py | 3 +-- 16 files changed, 64 insertions(+), 52 deletions(-) rename piker/{_daemon.py => service/__init__.py} (99%) diff --git a/piker/__init__.py b/piker/__init__.py index d08c2dbce2..6ebeec3df6 100644 --- a/piker/__init__.py +++ b/piker/__init__.py @@ -1,5 +1,5 @@ # piker: trading gear for hackers. -# Copyright 2020-eternity Tyler Goodlet (in stewardship for piker0) +# Copyright 2020-eternity Tyler Goodlet (in stewardship for pikers) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -14,11 +14,11 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -""" +''' piker: trading gear for hackers. -""" -from ._daemon import open_piker_runtime +''' +from .service import open_piker_runtime from .data.feed import open_feed __all__ = [ diff --git a/piker/brokers/cli.py b/piker/brokers/cli.py index 0d84384d69..f86c679ebc 100644 --- a/piker/brokers/cli.py +++ b/piker/brokers/cli.py @@ -29,8 +29,15 @@ from ..cli import cli from .. import watchlists as wl from ..log import get_console_log, colorize_json, get_logger -from .._daemon import maybe_spawn_brokerd, maybe_open_pikerd -from ..brokers import core, get_brokermod, data +from ..service import ( + maybe_spawn_brokerd, + maybe_open_pikerd, +) +from ..brokers import ( + core, + get_brokermod, + data, +) log = get_logger('cli') DEFAULT_BROKER = 'questrade' @@ -60,6 +67,7 @@ def get_method(client, meth_name: str): print_ok('found!.') return method + async def run_method(client, meth_name: str, **kwargs): method = get_method(client, meth_name) print('running...', end='', flush=True) @@ -67,19 +75,20 @@ async def run_method(client, meth_name: str, **kwargs): print_ok(f'done! result: {type(result)}') return result + async def run_test(broker_name: str): brokermod = get_brokermod(broker_name) total = 0 passed = 0 failed = 0 - print(f'getting client...', end='', flush=True) + print('getting client...', end='', flush=True) if not hasattr(brokermod, 'get_client'): print_error('fail! no \'get_client\' context manager found.') return async with brokermod.get_client(is_brokercheck=True) as client: - print_ok(f'done! inside client context.') + print_ok('done! inside client context.') # check for methods present on brokermod method_list = [ @@ -130,7 +139,6 @@ async def run_test(broker_name: str): total += 1 - # check for methods present con brokermod.Client and their # results @@ -180,7 +188,6 @@ async def bcheck_main(): trio.run(run_test, broker) - @cli.command() @click.option('--keys', '-k', multiple=True, help='Return results only for these keys') @@ -335,8 +342,6 @@ def contracts(ctx, loglevel, broker, symbol, ids): brokermod = get_brokermod(broker) get_console_log(loglevel) - - contracts = trio.run(partial(core.contracts, brokermod, symbol)) if not ids: # just print out expiry dates which can be used with diff --git a/piker/brokers/core.py b/piker/brokers/core.py index af5da3a1e6..3e9e1614a4 100644 --- a/piker/brokers/core.py +++ b/piker/brokers/core.py @@ -28,7 +28,7 @@ from ..log import get_logger from . import get_brokermod -from .._daemon import maybe_spawn_brokerd +from ..service import maybe_spawn_brokerd from .._cacheables import open_cached_client diff --git a/piker/clearing/_client.py b/piker/clearing/_client.py index 0a40b5480b..7d03406a22 100644 --- a/piker/clearing/_client.py +++ b/piker/clearing/_client.py @@ -29,8 +29,11 @@ from ..log import get_logger from ..data.types import Struct -from .._daemon import maybe_open_emsd -from ._messages import Order, Cancel +from ..service import maybe_open_emsd +from ._messages import ( + Order, + Cancel, +) from ..brokers import get_brokermod if TYPE_CHECKING: diff --git a/piker/cli/__init__.py b/piker/cli/__init__.py index 9b6f225ce9..b4d13505b0 100644 --- a/piker/cli/__init__.py +++ b/piker/cli/__init__.py @@ -19,16 +19,18 @@ ''' import os -from pprint import pformat -from functools import partial import click import trio import tractor -from ..log import get_console_log, get_logger, colorize_json +from ..log import ( + get_console_log, + get_logger, + colorize_json, +) from ..brokers import get_brokermod -from .._daemon import ( +from ..service import ( _default_registry_host, _default_registry_port, ) @@ -68,7 +70,7 @@ def pikerd( ''' - from .._daemon import open_pikerd + from ..service import open_pikerd log = get_console_log(loglevel) if pdb: @@ -171,7 +173,7 @@ def cli( @click.pass_obj def services(config, tl, ports): - from .._daemon import ( + from ..service import ( open_piker_runtime, _default_registry_port, _default_registry_host, diff --git a/piker/data/_ahab.py b/piker/data/_ahab.py index 2c0230f109..38d4a9e7b2 100644 --- a/piker/data/_ahab.py +++ b/piker/data/_ahab.py @@ -426,11 +426,15 @@ async def open_ahabd( finally: # TODO: ensure loglevel can be set and teardown logs are # reported if possible on error or cancel.. - with trio.CancelScope(shield=True): - await cntr.cancel( - log_msg_key=conf['log_msg_key'], - stop_predicate=stop_lambda, - ) + # XXX WARNING: currently shielding here can result in hangs + # on ctl-c from user.. ideally we can avoid a cancel getting + # consumed and not propagating whilst still doing teardown + # logging.. + # with trio.CancelScope(shield=True): + await cntr.cancel( + log_msg_key=conf['log_msg_key'], + stop_predicate=stop_lambda, + ) async def start_ahab( diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py index a5df96ccac..f44304bf93 100644 --- a/piker/data/_sampling.py +++ b/piker/data/_sampling.py @@ -42,7 +42,7 @@ get_logger, get_console_log, ) -from .._daemon import maybe_spawn_daemon +from ..service import maybe_spawn_daemon if TYPE_CHECKING: from ._sharedmem import ( @@ -68,8 +68,8 @@ class Sampler: This non-instantiated type is meant to be a singleton within a `samplerd` actor-service spawned once by the user wishing to - time-step sample real-time quote feeds, see - ``._daemon.maybe_open_samplerd()`` and the below + time-step-sample (real-time) quote feeds, see + ``.service.maybe_open_samplerd()`` and the below ``register_with_sampler()``. ''' @@ -379,7 +379,7 @@ async def spawn_samplerd( update and increment count write and stream broadcasting. ''' - from piker._daemon import Services + from piker.service import Services dname = 'samplerd' log.info(f'Spawning `{dname}`') diff --git a/piker/data/cli.py b/piker/data/cli.py index 994b9da431..7c8b9a68d9 100644 --- a/piker/data/cli.py +++ b/piker/data/cli.py @@ -137,7 +137,7 @@ def storesh( ''' from piker.data.marketstore import open_tsdb_client - from piker._daemon import open_piker_runtime + from piker.service import open_piker_runtime async def main(): nonlocal symbols @@ -187,7 +187,7 @@ def storage( ''' from piker.data.marketstore import open_tsdb_client - from piker._daemon import open_piker_runtime + from piker.service import open_piker_runtime async def main(): nonlocal symbols diff --git a/piker/data/feed.py b/piker/data/feed.py index 906f4bb4f7..a31e955ad4 100644 --- a/piker/data/feed.py +++ b/piker/data/feed.py @@ -58,7 +58,7 @@ get_logger, get_console_log, ) -from .._daemon import ( +from ..service import ( maybe_spawn_brokerd, check_for_service, ) diff --git a/piker/_daemon.py b/piker/service/__init__.py similarity index 99% rename from piker/_daemon.py rename to piker/service/__init__.py index 1a8576a138..6788b7642e 100644 --- a/piker/_daemon.py +++ b/piker/service/__init__.py @@ -19,6 +19,8 @@ """ from __future__ import annotations +from pprint import pformat +from functools import partial import os from typing import ( Optional, @@ -35,14 +37,11 @@ import trio from trio_typing import TaskStatus -from .log import ( +from ..log import ( get_logger, get_console_log, ) -from .brokers import get_brokermod - -from pprint import pformat -from functools import partial +from ..brokers import get_brokermod log = get_logger(__name__) @@ -669,7 +668,7 @@ async def spawn_brokerd( ) # non-blocking setup of brokerd service nursery - from .data import _setup_persistent_brokerd + from ..data import _setup_persistent_brokerd await Services.start_service_task( dname, @@ -732,7 +731,7 @@ async def spawn_emsd( ) # non-blocking setup of clearing service - from .clearing._ems import _setup_persistent_emsd + from ..clearing._ems import _setup_persistent_emsd await Services.start_service_task( 'emsd', diff --git a/piker/ui/_app.py b/piker/ui/_app.py index 3be073e798..9978dbe38c 100644 --- a/piker/ui/_app.py +++ b/piker/ui/_app.py @@ -24,7 +24,7 @@ from PyQt5.QtCore import QEvent import trio -from .._daemon import maybe_spawn_brokerd +from ..service import maybe_spawn_brokerd from . import _event from ._exec import run_qtractor from ..data.feed import install_brokerd_search diff --git a/piker/ui/_exec.py b/piker/ui/_exec.py index d8eabb706e..19663cacdf 100644 --- a/piker/ui/_exec.py +++ b/piker/ui/_exec.py @@ -49,7 +49,7 @@ import trio from outcome import Error -from .._daemon import ( +from ..service import ( maybe_open_pikerd, get_tractor_runtime_kwargs, ) diff --git a/piker/ui/cli.py b/piker/ui/cli.py index a72c2f5c9e..9b8385f21f 100644 --- a/piker/ui/cli.py +++ b/piker/ui/cli.py @@ -24,7 +24,7 @@ from ..cli import cli from .. import watchlists as wl -from .._daemon import maybe_spawn_brokerd +from ..service import maybe_spawn_brokerd _config_dir = click.get_app_dir('piker') diff --git a/tests/conftest.py b/tests/conftest.py index 8218ec164a..68d392aacc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,6 @@ from contextlib import asynccontextmanager as acm from functools import partial import os -from typing import AsyncContextManager from pathlib import Path from shutil import rmtree @@ -11,7 +10,7 @@ # log, config, ) -from piker._daemon import ( +from piker.service import ( Services, ) from piker.clearing._client import open_ems @@ -88,7 +87,7 @@ async def _open_test_pikerd( ''' import random - from piker._daemon import maybe_open_pikerd + from piker.service import maybe_open_pikerd if reg_addr is None: port = random.randint(6e3, 7e3) @@ -151,8 +150,9 @@ async def _open_test_pikerd_and_ems( fqsn, mode=mode, loglevel=loglevel, - ) as ems_services): - yield (services, ems_services) + ) as ems_services, + ): + yield (services, ems_services) @pytest.fixture @@ -168,7 +168,7 @@ def open_test_pikerd_and_ems( mode, loglevel, open_test_pikerd - ) + ) @pytest.fixture(scope='module') diff --git a/tests/test_databases.py b/tests/test_databases.py index 4eb444f334..7fcee34ade 100644 --- a/tests/test_databases.py +++ b/tests/test_databases.py @@ -3,7 +3,7 @@ from typing import AsyncContextManager -from piker._daemon import Services +from piker.service import Services from piker.log import get_logger from elasticsearch import Elasticsearch diff --git a/tests/test_services.py b/tests/test_services.py index 763b438e50..29e613e34a 100644 --- a/tests/test_services.py +++ b/tests/test_services.py @@ -9,8 +9,7 @@ import trio import tractor -from piker.log import get_logger -from piker._daemon import ( +from piker.service import ( find_service, Services, ) From afac553ea2711e6563cb03f1d852cbfd48aaf391 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:25:20 -0500 Subject: [PATCH 014/136] Move all docker and external db code to `piker.service` --- piker/cli/__init__.py | 4 ++-- piker/data/cli.py | 4 ++-- piker/service/__init__.py | 4 ++-- piker/{data => service}/_ahab.py | 0 piker/{data => service}/elastic.py | 0 piker/{data => service}/marketstore.py | 5 ++--- 6 files changed, 8 insertions(+), 9 deletions(-) rename piker/{data => service}/_ahab.py (100%) rename piker/{data => service}/elastic.py (100%) rename piker/{data => service}/marketstore.py (99%) diff --git a/piker/cli/__init__.py b/piker/cli/__init__.py index b4d13505b0..63b8321afe 100644 --- a/piker/cli/__init__.py +++ b/piker/cli/__init__.py @@ -206,8 +206,8 @@ async def list_services(): def _load_clis() -> None: - from ..data import marketstore # noqa - from ..data import elastic + from ..service import marketstore # noqa + from ..service import elastic from ..data import cli # noqa from ..brokers import cli # noqa from ..ui import cli # noqa diff --git a/piker/data/cli.py b/piker/data/cli.py index 7c8b9a68d9..cb081c6ee4 100644 --- a/piker/data/cli.py +++ b/piker/data/cli.py @@ -144,7 +144,7 @@ async def main(): async with open_piker_runtime( 'storesh', - enable_modules=['piker.data._ahab'], + enable_modules=['piker.service._ahab'], ): symbol = symbols[0] @@ -194,7 +194,7 @@ async def main(): async with open_piker_runtime( 'tsdb_storage', - enable_modules=['piker.data._ahab'], + enable_modules=['piker.service._ahab'], ): symbol = symbols[0] async with open_tsdb_client(symbol) as storage: diff --git a/piker/service/__init__.py b/piker/service/__init__.py index 6788b7642e..04ec4c289a 100644 --- a/piker/service/__init__.py +++ b/piker/service/__init__.py @@ -362,8 +362,8 @@ async def open_pikerd( Services.debug_mode = debug_mode if tsdb: - from piker.data._ahab import start_ahab - from piker.data.marketstore import start_marketstore + from ._ahab import start_ahab + from .marketstore import start_marketstore log.info('Spawning `marketstore` supervisor') ctn_ready, config, (cid, pid) = await service_nursery.start( diff --git a/piker/data/_ahab.py b/piker/service/_ahab.py similarity index 100% rename from piker/data/_ahab.py rename to piker/service/_ahab.py diff --git a/piker/data/elastic.py b/piker/service/elastic.py similarity index 100% rename from piker/data/elastic.py rename to piker/service/elastic.py diff --git a/piker/data/marketstore.py b/piker/service/marketstore.py similarity index 99% rename from piker/data/marketstore.py rename to piker/service/marketstore.py index 6e3ed78f03..5613bd8db8 100644 --- a/piker/data/marketstore.py +++ b/piker/service/marketstore.py @@ -26,7 +26,6 @@ from __future__ import annotations from contextlib import asynccontextmanager as acm from datetime import datetime -from pprint import pformat from typing import ( Any, Optional, @@ -55,7 +54,7 @@ import docker from ._ahab import DockerContainer -from .feed import maybe_open_feed +from ..data.feed import maybe_open_feed from ..log import get_logger, get_console_log from .._profile import Profiler @@ -136,7 +135,7 @@ def start_marketstore( # create dirs when dne if not os.path.isdir(config._config_dir): - Path(config._config_dir).mkdir(parents=True, exist_ok=True) + Path(config._config_dir).mkdir(parents=True, exist_ok=True) if not os.path.isdir(mktsdir): os.mkdir(mktsdir) From dd87d1142e19e1de6fd3e0719a4cc970bc584576 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:25:38 -0500 Subject: [PATCH 015/136] Bump mkts timeout to 2s --- piker/service/marketstore.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/service/marketstore.py b/piker/service/marketstore.py index 5613bd8db8..e9de95580a 100644 --- a/piker/service/marketstore.py +++ b/piker/service/marketstore.py @@ -67,7 +67,7 @@ 'grpc_listen_port': 5995, 'ws_listen_port': 5993, 'log_level': 'debug', - 'startup_timeout': 1, + 'startup_timeout': 2, } _yaml_config = ''' From b226b678e995eb63f726c45d2ccd36e9fc65d87b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:48:17 -0500 Subject: [PATCH 016/136] Fix missed `marketstore` mod imports --- piker/data/cli.py | 2 +- piker/data/feed.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/piker/data/cli.py b/piker/data/cli.py index cb081c6ee4..6f4e169de5 100644 --- a/piker/data/cli.py +++ b/piker/data/cli.py @@ -30,7 +30,7 @@ import click import numpy as np -from .marketstore import ( +from ..service.marketstore import ( get_client, # stream_quotes, ingest_quote_stream, diff --git a/piker/data/feed.py b/piker/data/feed.py index a31e955ad4..69d5be7dc9 100644 --- a/piker/data/feed.py +++ b/piker/data/feed.py @@ -86,7 +86,7 @@ ) if TYPE_CHECKING: - from .marketstore import Storage + from ..service.marketstore import Storage log = get_logger(__name__) @@ -865,7 +865,7 @@ async def manage_history( ): log.info('Found existing `marketstored`') - from . import marketstore + from ..service import marketstore async with ( marketstore.open_storage_client(fqsn)as storage, ): From 31f2b01c3ecb4e2411d116159bc5bc6f5bbd9c5a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 15:59:19 -0500 Subject: [PATCH 017/136] Move `Services` api to `.service._mngr` mod --- piker/service/__init__.py | 110 ++---------------------------- piker/service/_mngr.py | 136 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 105 deletions(-) create mode 100644 piker/service/_mngr.py diff --git a/piker/service/__init__.py b/piker/service/__init__.py index 04ec4c289a..e5e2c1fa1e 100644 --- a/piker/service/__init__.py +++ b/piker/service/__init__.py @@ -1,5 +1,5 @@ # piker: trading gear for hackers -# Copyright (C) Tyler Goodlet (in stewardship for piker0) +# Copyright (C) Tyler Goodlet (in stewardship for pikers) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -15,7 +15,7 @@ # along with this program. If not, see . """ -Structured, daemon tree service management. +Actor-runtime service orchestration machinery. """ from __future__ import annotations @@ -31,17 +31,18 @@ from contextlib import ( asynccontextmanager as acm, ) -from collections import defaultdict import tractor import trio -from trio_typing import TaskStatus from ..log import ( get_logger, get_console_log, ) from ..brokers import get_brokermod +from ._mngr import ( + Services, +) log = get_logger(__name__) @@ -142,107 +143,6 @@ def get_tractor_runtime_kwargs() -> dict[str, Any]: ] -# TODO: factor this into a ``tractor.highlevel`` extension -# pack for the library. -class Services: - - actor_n: tractor._supervise.ActorNursery - service_n: trio.Nursery - debug_mode: bool # tractor sub-actor debug mode flag - service_tasks: dict[ - str, - tuple[ - trio.CancelScope, - tractor.Portal, - trio.Event, - ] - ] = {} - locks = defaultdict(trio.Lock) - - @classmethod - async def start_service_task( - self, - name: str, - portal: tractor.Portal, - target: Callable, - **kwargs, - - ) -> (trio.CancelScope, tractor.Context): - ''' - Open a context in a service sub-actor, add to a stack - that gets unwound at ``pikerd`` teardown. - - This allows for allocating long-running sub-services in our main - daemon and explicitly controlling their lifetimes. - - ''' - async def open_context_in_task( - task_status: TaskStatus[ - tuple[ - trio.CancelScope, - trio.Event, - Any, - ] - ] = trio.TASK_STATUS_IGNORED, - - ) -> Any: - - with trio.CancelScope() as cs: - async with portal.open_context( - target, - **kwargs, - - ) as (ctx, first): - - # unblock once the remote context has started - complete = trio.Event() - task_status.started((cs, complete, first)) - log.info( - f'`pikerd` service {name} started with value {first}' - ) - try: - # wait on any context's return value - # and any final portal result from the - # sub-actor. - ctx_res = await ctx.result() - - # NOTE: blocks indefinitely until cancelled - # either by error from the target context - # function or by being cancelled here by the - # surrounding cancel scope. - return (await portal.result(), ctx_res) - - finally: - await portal.cancel_actor() - complete.set() - self.service_tasks.pop(name) - - cs, complete, first = await self.service_n.start(open_context_in_task) - - # store the cancel scope and portal for later cancellation or - # retstart if needed. - self.service_tasks[name] = (cs, portal, complete) - - return cs, first - - @classmethod - async def cancel_service( - self, - name: str, - - ) -> Any: - ''' - Cancel the service task and actor for the given ``name``. - - ''' - log.info(f'Cancelling `pikerd` service {name}') - cs, portal, complete = self.service_tasks[name] - cs.cancel() - await complete.wait() - assert name not in self.service_tasks, \ - f'Serice task for {name} not terminated?' - - @acm async def open_piker_runtime( name: str, diff --git a/piker/service/_mngr.py b/piker/service/_mngr.py new file mode 100644 index 0000000000..04f396af6f --- /dev/null +++ b/piker/service/_mngr.py @@ -0,0 +1,136 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +daemon-service management API. + +""" +from collections import defaultdict +from typing import ( + Callable, + Any, +) + +import trio +from trio_typing import TaskStatus +import tractor + +from ..log import ( + get_logger, +) + +log = get_logger(__name__) + + +# TODO: factor this into a ``tractor.highlevel`` extension +# pack for the library. +class Services: + + actor_n: tractor._supervise.ActorNursery + service_n: trio.Nursery + debug_mode: bool # tractor sub-actor debug mode flag + service_tasks: dict[ + str, + tuple[ + trio.CancelScope, + tractor.Portal, + trio.Event, + ] + ] = {} + locks = defaultdict(trio.Lock) + + @classmethod + async def start_service_task( + self, + name: str, + portal: tractor.Portal, + target: Callable, + **kwargs, + + ) -> (trio.CancelScope, tractor.Context): + ''' + Open a context in a service sub-actor, add to a stack + that gets unwound at ``pikerd`` teardown. + + This allows for allocating long-running sub-services in our main + daemon and explicitly controlling their lifetimes. + + ''' + async def open_context_in_task( + task_status: TaskStatus[ + tuple[ + trio.CancelScope, + trio.Event, + Any, + ] + ] = trio.TASK_STATUS_IGNORED, + + ) -> Any: + + with trio.CancelScope() as cs: + async with portal.open_context( + target, + **kwargs, + + ) as (ctx, first): + + # unblock once the remote context has started + complete = trio.Event() + task_status.started((cs, complete, first)) + log.info( + f'`pikerd` service {name} started with value {first}' + ) + try: + # wait on any context's return value + # and any final portal result from the + # sub-actor. + ctx_res = await ctx.result() + + # NOTE: blocks indefinitely until cancelled + # either by error from the target context + # function or by being cancelled here by the + # surrounding cancel scope. + return (await portal.result(), ctx_res) + + finally: + await portal.cancel_actor() + complete.set() + self.service_tasks.pop(name) + + cs, complete, first = await self.service_n.start(open_context_in_task) + + # store the cancel scope and portal for later cancellation or + # retstart if needed. + self.service_tasks[name] = (cs, portal, complete) + + return cs, first + + @classmethod + async def cancel_service( + self, + name: str, + + ) -> Any: + ''' + Cancel the service task and actor for the given ``name``. + + ''' + log.info(f'Cancelling `pikerd` service {name}') + cs, portal, complete = self.service_tasks[name] + cs.cancel() + await complete.wait() + assert name not in self.service_tasks, \ + f'Serice task for {name} not terminated?' From a2d40937a3b030fd6e24a4abec0f32ab85e23bca Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 16:05:49 -0500 Subject: [PATCH 018/136] Move actor-discovery utils to `.service._registry --- piker/service/__init__.py | 117 +++--------------------------- piker/service/_registry.py | 144 +++++++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 106 deletions(-) create mode 100644 piker/service/_registry.py diff --git a/piker/service/__init__.py b/piker/service/__init__.py index e5e2c1fa1e..e365e7fd07 100644 --- a/piker/service/__init__.py +++ b/piker/service/__init__.py @@ -43,88 +43,24 @@ from ._mngr import ( Services, ) - - -log = get_logger(__name__) - -_root_dname = 'pikerd' - -_default_registry_host: str = '127.0.0.1' -_default_registry_port: int = 6116 -_default_reg_addr: tuple[str, int] = ( +from ._registry import ( # noqa + _tractor_kwargs, + _default_reg_addr, _default_registry_host, _default_registry_port, + open_registry, + find_service, + check_for_service, ) +log = get_logger(__name__) -# NOTE: this value is set as an actor-global once the first endpoint -# who is capable, spawns a `pikerd` service tree. -_registry: Registry | None = None - - -class Registry: - addr: None | tuple[str, int] = None - - # TODO: table of uids to sockaddrs - peers: dict[ - tuple[str, str], - tuple[str, int], - ] = {} - - -_tractor_kwargs: dict[str, Any] = {} - - -@acm -async def open_registry( - addr: None | tuple[str, int] = None, - ensure_exists: bool = True, - -) -> tuple[str, int]: - - global _tractor_kwargs - actor = tractor.current_actor() - uid = actor.uid - if ( - Registry.addr is not None - and addr - ): - raise RuntimeError( - f'`{uid}` registry addr already bound @ {_registry.sockaddr}' - ) - - was_set: bool = False - - if ( - not tractor.is_root_process() - and Registry.addr is None - ): - Registry.addr = actor._arb_addr - - if ( - ensure_exists - and Registry.addr is None - ): - raise RuntimeError( - f"`{uid}` registry should already exist bug doesn't?" - ) - - if ( - Registry.addr is None - ): - was_set = True - Registry.addr = addr or _default_reg_addr - _tractor_kwargs['arbiter_addr'] = Registry.addr +__all__ = [ + 'check_for_service', +] - try: - yield Registry.addr - finally: - # XXX: always clear the global addr if we set it so that the - # next (set of) calls will apply whatever new one is passed - # in. - if was_set: - Registry.addr = None +_root_dname = 'pikerd' def get_tractor_runtime_kwargs() -> dict[str, Any]: @@ -415,37 +351,6 @@ async def maybe_open_pikerd( ] -@acm -async def find_service( - service_name: str, -) -> tractor.Portal | None: - - async with open_registry() as reg_addr: - log.info(f'Scanning for service `{service_name}`') - # attach to existing daemon by name if possible - async with tractor.find_actor( - service_name, - arbiter_sockaddr=reg_addr, - ) as maybe_portal: - yield maybe_portal - - -async def check_for_service( - service_name: str, - -) -> None | tuple[str, int]: - ''' - Service daemon "liveness" predicate. - - ''' - async with open_registry(ensure_exists=False) as reg_addr: - async with tractor.query_actor( - service_name, - arbiter_sockaddr=reg_addr, - ) as sockaddr: - return sockaddr - - @acm async def maybe_spawn_daemon( diff --git a/piker/service/_registry.py b/piker/service/_registry.py new file mode 100644 index 0000000000..f487e2a464 --- /dev/null +++ b/piker/service/_registry.py @@ -0,0 +1,144 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +Inter-actor "discovery" (protocol) layer. + +""" +from __future__ import annotations +from contextlib import ( + asynccontextmanager as acm, +) +from typing import ( + Any, +) + +import tractor + + +from ..log import ( + get_logger, +) + +log = get_logger(__name__) + +_default_registry_host: str = '127.0.0.1' +_default_registry_port: int = 6116 +_default_reg_addr: tuple[str, int] = ( + _default_registry_host, + _default_registry_port, +) + + +# NOTE: this value is set as an actor-global once the first endpoint +# who is capable, spawns a `pikerd` service tree. +_registry: Registry | None = None + + +class Registry: + addr: None | tuple[str, int] = None + + # TODO: table of uids to sockaddrs + peers: dict[ + tuple[str, str], + tuple[str, int], + ] = {} + + +_tractor_kwargs: dict[str, Any] = {} + + +@acm +async def open_registry( + addr: None | tuple[str, int] = None, + ensure_exists: bool = True, + +) -> tuple[str, int]: + + global _tractor_kwargs + actor = tractor.current_actor() + uid = actor.uid + if ( + Registry.addr is not None + and addr + ): + raise RuntimeError( + f'`{uid}` registry addr already bound @ {_registry.sockaddr}' + ) + + was_set: bool = False + + if ( + not tractor.is_root_process() + and Registry.addr is None + ): + Registry.addr = actor._arb_addr + + if ( + ensure_exists + and Registry.addr is None + ): + raise RuntimeError( + f"`{uid}` registry should already exist bug doesn't?" + ) + + if ( + Registry.addr is None + ): + was_set = True + Registry.addr = addr or _default_reg_addr + + _tractor_kwargs['arbiter_addr'] = Registry.addr + + try: + yield Registry.addr + finally: + # XXX: always clear the global addr if we set it so that the + # next (set of) calls will apply whatever new one is passed + # in. + if was_set: + Registry.addr = None + + +@acm +async def find_service( + service_name: str, +) -> tractor.Portal | None: + + async with open_registry() as reg_addr: + log.info(f'Scanning for service `{service_name}`') + # attach to existing daemon by name if possible + async with tractor.find_actor( + service_name, + arbiter_sockaddr=reg_addr, + ) as maybe_portal: + yield maybe_portal + + +async def check_for_service( + service_name: str, + +) -> None | tuple[str, int]: + ''' + Service daemon "liveness" predicate. + + ''' + async with open_registry(ensure_exists=False) as reg_addr: + async with tractor.query_actor( + service_name, + arbiter_sockaddr=reg_addr, + ) as sockaddr: + return sockaddr From eca048c0c582019acc2fb23e7c0b463bc4d26fc9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 16:20:45 -0500 Subject: [PATCH 019/136] Move daemon spawning endpoints to `service._daemon` module --- piker/service/__init__.py | 238 ++------------------------------- piker/service/_daemon.py | 271 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 279 insertions(+), 230 deletions(-) create mode 100644 piker/service/_daemon.py diff --git a/piker/service/__init__.py b/piker/service/__init__.py index e365e7fd07..32a05ae5b1 100644 --- a/piker/service/__init__.py +++ b/piker/service/__init__.py @@ -24,7 +24,6 @@ import os from typing import ( Optional, - Callable, Any, ClassVar, ) @@ -39,7 +38,6 @@ get_logger, get_console_log, ) -from ..brokers import get_brokermod from ._mngr import ( Services, ) @@ -52,6 +50,13 @@ find_service, check_for_service, ) +from ._daemon import ( # noqa + maybe_spawn_daemon, + spawn_brokerd, + maybe_spawn_brokerd, + spawn_emsd, + maybe_open_emsd, +) log = get_logger(__name__) @@ -73,6 +78,7 @@ def get_tractor_runtime_kwargs() -> dict[str, Any]: _root_modules = [ __name__, + 'piker.service._daemon', 'piker.clearing._ems', 'piker.clearing._client', 'piker.data._sampling', @@ -337,231 +343,3 @@ async def maybe_open_pikerd( # we return no portal to self. assert service_manager yield service_manager - - -# `brokerd` enabled modules -# NOTE: keeping this list as small as possible is part of our caps-sec -# model and should be treated with utmost care! -_data_mods = [ - 'piker.brokers.core', - 'piker.brokers.data', - 'piker.data', - 'piker.data.feed', - 'piker.data._sampling' -] - - -@acm -async def maybe_spawn_daemon( - - service_name: str, - service_task_target: Callable, - spawn_args: dict[str, Any], - loglevel: Optional[str] = None, - - singleton: bool = False, - **kwargs, - -) -> tractor.Portal: - ''' - If no ``service_name`` daemon-actor can be found, - spawn one in a local subactor and return a portal to it. - - If this function is called from a non-pikerd actor, the - spawned service will persist as long as pikerd does or - it is requested to be cancelled. - - This can be seen as a service starting api for remote-actor - clients. - - ''' - if loglevel: - get_console_log(loglevel) - - # serialize access to this section to avoid - # 2 or more tasks racing to create a daemon - lock = Services.locks[service_name] - await lock.acquire() - - async with find_service(service_name) as portal: - if portal is not None: - lock.release() - yield portal - return - - log.warning(f"Couldn't find any existing {service_name}") - - # TODO: really shouldn't the actor spawning be part of the service - # starting method `Services.start_service()` ? - - # ask root ``pikerd`` daemon to spawn the daemon we need if - # pikerd is not live we now become the root of the - # process tree - async with maybe_open_pikerd( - - loglevel=loglevel, - **kwargs, - - ) as pikerd_portal: - - # we are the root and thus are `pikerd` - # so spawn the target service directly by calling - # the provided target routine. - # XXX: this assumes that the target is well formed and will - # do the right things to setup both a sub-actor **and** call - # the ``_Services`` api from above to start the top level - # service task for that actor. - started: bool - if pikerd_portal is None: - started = await service_task_target(**spawn_args) - - else: - # tell the remote `pikerd` to start the target, - # the target can't return a non-serializable value - # since it is expected that service startingn is - # non-blocking and the target task will persist running - # on `pikerd` after the client requesting it's start - # disconnects. - started = await pikerd_portal.run( - service_task_target, - **spawn_args, - ) - - if started: - log.info(f'Service {service_name} started!') - - async with tractor.wait_for_actor(service_name) as portal: - lock.release() - yield portal - await portal.cancel_actor() - - -async def spawn_brokerd( - - brokername: str, - loglevel: Optional[str] = None, - **tractor_kwargs, - -) -> bool: - - log.info(f'Spawning {brokername} broker daemon') - - brokermod = get_brokermod(brokername) - dname = f'brokerd.{brokername}' - - extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {}) - tractor_kwargs.update(extra_tractor_kwargs) - - # ask `pikerd` to spawn a new sub-actor and manage it under its - # actor nursery - modpath = brokermod.__name__ - broker_enable = [modpath] - for submodname in getattr( - brokermod, - '__enable_modules__', - [], - ): - subpath = f'{modpath}.{submodname}' - broker_enable.append(subpath) - - portal = await Services.actor_n.start_actor( - dname, - enable_modules=_data_mods + broker_enable, - loglevel=loglevel, - debug_mode=Services.debug_mode, - **tractor_kwargs - ) - - # non-blocking setup of brokerd service nursery - from ..data import _setup_persistent_brokerd - - await Services.start_service_task( - dname, - portal, - _setup_persistent_brokerd, - brokername=brokername, - ) - return True - - -@acm -async def maybe_spawn_brokerd( - - brokername: str, - loglevel: Optional[str] = None, - **kwargs, - -) -> tractor.Portal: - ''' - Helper to spawn a brokerd service *from* a client - who wishes to use the sub-actor-daemon. - - ''' - async with maybe_spawn_daemon( - - f'brokerd.{brokername}', - service_task_target=spawn_brokerd, - spawn_args={ - 'brokername': brokername, - 'loglevel': loglevel, - }, - loglevel=loglevel, - **kwargs, - - ) as portal: - yield portal - - -async def spawn_emsd( - - loglevel: Optional[str] = None, - **extra_tractor_kwargs - -) -> bool: - """ - Start the clearing engine under ``pikerd``. - - """ - log.info('Spawning emsd') - - portal = await Services.actor_n.start_actor( - 'emsd', - enable_modules=[ - 'piker.clearing._ems', - 'piker.clearing._client', - ], - loglevel=loglevel, - debug_mode=Services.debug_mode, # set by pikerd flag - **extra_tractor_kwargs - ) - - # non-blocking setup of clearing service - from ..clearing._ems import _setup_persistent_emsd - - await Services.start_service_task( - 'emsd', - portal, - _setup_persistent_emsd, - ) - return True - - -@acm -async def maybe_open_emsd( - - brokername: str, - loglevel: Optional[str] = None, - **kwargs, - -) -> tractor._portal.Portal: # noqa - - async with maybe_spawn_daemon( - - 'emsd', - service_task_target=spawn_emsd, - spawn_args={'loglevel': loglevel}, - loglevel=loglevel, - **kwargs, - - ) as portal: - yield portal diff --git a/piker/service/_daemon.py b/piker/service/_daemon.py new file mode 100644 index 0000000000..8cb9054fa6 --- /dev/null +++ b/piker/service/_daemon.py @@ -0,0 +1,271 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +Daemon-actor spawning "endpoint-hooks". + +""" +from __future__ import annotations +from typing import ( + Optional, + Callable, + Any, +) +from contextlib import ( + asynccontextmanager as acm, +) + +import tractor + +from ..log import ( + get_logger, + get_console_log, +) +from ..brokers import get_brokermod +from ._mngr import ( + Services, +) +from ._registry import find_service + +log = get_logger(__name__) + +# `brokerd` enabled modules +# NOTE: keeping this list as small as possible is part of our caps-sec +# model and should be treated with utmost care! +_data_mods = [ + 'piker.brokers.core', + 'piker.brokers.data', + 'piker.data', + 'piker.data.feed', + 'piker.data._sampling' +] + + +@acm +async def maybe_spawn_daemon( + + service_name: str, + service_task_target: Callable, + spawn_args: dict[str, Any], + loglevel: Optional[str] = None, + + singleton: bool = False, + **kwargs, + +) -> tractor.Portal: + ''' + If no ``service_name`` daemon-actor can be found, + spawn one in a local subactor and return a portal to it. + + If this function is called from a non-pikerd actor, the + spawned service will persist as long as pikerd does or + it is requested to be cancelled. + + This can be seen as a service starting api for remote-actor + clients. + + ''' + if loglevel: + get_console_log(loglevel) + + # serialize access to this section to avoid + # 2 or more tasks racing to create a daemon + lock = Services.locks[service_name] + await lock.acquire() + + async with find_service(service_name) as portal: + if portal is not None: + lock.release() + yield portal + return + + log.warning(f"Couldn't find any existing {service_name}") + + # TODO: really shouldn't the actor spawning be part of the service + # starting method `Services.start_service()` ? + + # ask root ``pikerd`` daemon to spawn the daemon we need if + # pikerd is not live we now become the root of the + # process tree + from . import maybe_open_pikerd + async with maybe_open_pikerd( + + loglevel=loglevel, + **kwargs, + + ) as pikerd_portal: + + # we are the root and thus are `pikerd` + # so spawn the target service directly by calling + # the provided target routine. + # XXX: this assumes that the target is well formed and will + # do the right things to setup both a sub-actor **and** call + # the ``_Services`` api from above to start the top level + # service task for that actor. + started: bool + if pikerd_portal is None: + started = await service_task_target(**spawn_args) + + else: + # tell the remote `pikerd` to start the target, + # the target can't return a non-serializable value + # since it is expected that service startingn is + # non-blocking and the target task will persist running + # on `pikerd` after the client requesting it's start + # disconnects. + started = await pikerd_portal.run( + service_task_target, + **spawn_args, + ) + + if started: + log.info(f'Service {service_name} started!') + + async with tractor.wait_for_actor(service_name) as portal: + lock.release() + yield portal + await portal.cancel_actor() + + +async def spawn_brokerd( + + brokername: str, + loglevel: Optional[str] = None, + **tractor_kwargs, + +) -> bool: + + log.info(f'Spawning {brokername} broker daemon') + + brokermod = get_brokermod(brokername) + dname = f'brokerd.{brokername}' + + extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {}) + tractor_kwargs.update(extra_tractor_kwargs) + + # ask `pikerd` to spawn a new sub-actor and manage it under its + # actor nursery + modpath = brokermod.__name__ + broker_enable = [modpath] + for submodname in getattr( + brokermod, + '__enable_modules__', + [], + ): + subpath = f'{modpath}.{submodname}' + broker_enable.append(subpath) + + portal = await Services.actor_n.start_actor( + dname, + enable_modules=_data_mods + broker_enable, + loglevel=loglevel, + debug_mode=Services.debug_mode, + **tractor_kwargs + ) + + # non-blocking setup of brokerd service nursery + from ..data import _setup_persistent_brokerd + + await Services.start_service_task( + dname, + portal, + _setup_persistent_brokerd, + brokername=brokername, + ) + return True + + +@acm +async def maybe_spawn_brokerd( + + brokername: str, + loglevel: Optional[str] = None, + **kwargs, + +) -> tractor.Portal: + ''' + Helper to spawn a brokerd service *from* a client + who wishes to use the sub-actor-daemon. + + ''' + async with maybe_spawn_daemon( + + f'brokerd.{brokername}', + service_task_target=spawn_brokerd, + spawn_args={ + 'brokername': brokername, + 'loglevel': loglevel, + }, + loglevel=loglevel, + **kwargs, + + ) as portal: + yield portal + + +async def spawn_emsd( + + loglevel: Optional[str] = None, + **extra_tractor_kwargs + +) -> bool: + """ + Start the clearing engine under ``pikerd``. + + """ + log.info('Spawning emsd') + + portal = await Services.actor_n.start_actor( + 'emsd', + enable_modules=[ + 'piker.clearing._ems', + 'piker.clearing._client', + ], + loglevel=loglevel, + debug_mode=Services.debug_mode, # set by pikerd flag + **extra_tractor_kwargs + ) + + # non-blocking setup of clearing service + from ..clearing._ems import _setup_persistent_emsd + + await Services.start_service_task( + 'emsd', + portal, + _setup_persistent_emsd, + ) + return True + + +@acm +async def maybe_open_emsd( + + brokername: str, + loglevel: Optional[str] = None, + **kwargs, + +) -> tractor._portal.Portal: # noqa + + async with maybe_spawn_daemon( + + 'emsd', + service_task_target=spawn_emsd, + spawn_args={'loglevel': loglevel}, + loglevel=loglevel, + **kwargs, + + ) as portal: + yield portal From f95ea19b21a792bdf36a2c3b10dfa1454f820606 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 16:28:38 -0500 Subject: [PATCH 020/136] Move `pikerd` runtime boostrap to `.service._actor_runtime` --- piker/service/__init__.py | 319 ++----------------------------- piker/service/_actor_runtime.py | 329 ++++++++++++++++++++++++++++++++ 2 files changed, 346 insertions(+), 302 deletions(-) create mode 100644 piker/service/_actor_runtime.py diff --git a/piker/service/__init__.py b/piker/service/__init__.py index 32a05ae5b1..3b9767cdc1 100644 --- a/piker/service/__init__.py +++ b/piker/service/__init__.py @@ -19,28 +19,8 @@ """ from __future__ import annotations -from pprint import pformat -from functools import partial -import os -from typing import ( - Optional, - Any, - ClassVar, -) -from contextlib import ( - asynccontextmanager as acm, -) - -import tractor -import trio -from ..log import ( - get_logger, - get_console_log, -) -from ._mngr import ( - Services, -) +from ._mngr import Services from ._registry import ( # noqa _tractor_kwargs, _default_reg_addr, @@ -57,289 +37,24 @@ spawn_emsd, maybe_open_emsd, ) - -log = get_logger(__name__) +from ._actor_runtime import ( + open_piker_runtime, + maybe_open_pikerd, + open_pikerd, + get_tractor_runtime_kwargs, +) __all__ = [ 'check_for_service', + 'Services', + 'maybe_spawn_daemon', + 'spawn_brokerd', + 'maybe_spawn_brokerd', + 'spawn_emsd', + 'maybe_open_emsd', + 'open_piker_runtime', + 'maybe_open_pikerd', + 'open_pikerd', + 'get_tractor_runtime_kwargs', ] - -_root_dname = 'pikerd' - - -def get_tractor_runtime_kwargs() -> dict[str, Any]: - ''' - Deliver ``tractor`` related runtime variables in a `dict`. - - ''' - return _tractor_kwargs - - -_root_modules = [ - __name__, - 'piker.service._daemon', - 'piker.clearing._ems', - 'piker.clearing._client', - 'piker.data._sampling', -] - - -@acm -async def open_piker_runtime( - name: str, - enable_modules: list[str] = [], - loglevel: Optional[str] = None, - - # XXX NOTE XXX: you should pretty much never want debug mode - # for data daemons when running in production. - debug_mode: bool = False, - - registry_addr: None | tuple[str, int] = None, - - # TODO: once we have `rsyscall` support we will read a config - # and spawn the service tree distributed per that. - start_method: str = 'trio', - - **tractor_kwargs, - -) -> tuple[ - tractor.Actor, - tuple[str, int], -]: - ''' - Start a piker actor who's runtime will automatically sync with - existing piker actors on the local link based on configuration. - - Can be called from a subactor or any program that needs to start - a root actor. - - ''' - try: - # check for existing runtime - actor = tractor.current_actor().uid - - except tractor._exceptions.NoRuntime: - - registry_addr = registry_addr or _default_reg_addr - - async with ( - tractor.open_root_actor( - - # passed through to ``open_root_actor`` - arbiter_addr=registry_addr, - name=name, - loglevel=loglevel, - debug_mode=debug_mode, - start_method=start_method, - - # TODO: eventually we should be able to avoid - # having the root have more then permissions to - # spawn other specialized daemons I think? - enable_modules=enable_modules, - - **tractor_kwargs, - ) as _, - - open_registry(registry_addr, ensure_exists=False) as addr, - ): - yield ( - tractor.current_actor(), - addr, - ) - else: - async with open_registry(registry_addr) as addr: - yield ( - actor, - addr, - ) - - -@acm -async def open_pikerd( - - loglevel: str | None = None, - - # XXX: you should pretty much never want debug mode - # for data daemons when running in production. - debug_mode: bool = False, - registry_addr: None | tuple[str, int] = None, - - # db init flags - tsdb: bool = False, - es: bool = False, - -) -> Services: - ''' - Start a root piker daemon who's lifetime extends indefinitely until - cancelled. - - A root actor nursery is created which can be used to create and keep - alive underling services (see below). - - ''' - async with ( - open_piker_runtime( - - name=_root_dname, - # TODO: eventually we should be able to avoid - # having the root have more then permissions to - # spawn other specialized daemons I think? - enable_modules=_root_modules, - - loglevel=loglevel, - debug_mode=debug_mode, - registry_addr=registry_addr, - - ) as (root_actor, reg_addr), - tractor.open_nursery() as actor_nursery, - trio.open_nursery() as service_nursery, - ): - if root_actor.accept_addr != reg_addr: - raise RuntimeError(f'Daemon failed to bind on {reg_addr}!?') - - # assign globally for future daemon/task creation - Services.actor_n = actor_nursery - Services.service_n = service_nursery - Services.debug_mode = debug_mode - - if tsdb: - from ._ahab import start_ahab - from .marketstore import start_marketstore - - log.info('Spawning `marketstore` supervisor') - ctn_ready, config, (cid, pid) = await service_nursery.start( - partial( - start_ahab, - 'marketstored', - start_marketstore, - loglevel=loglevel, - ) - - ) - log.info( - f'`marketstored` up!\n' - f'pid: {pid}\n' - f'container id: {cid[:12]}\n' - f'config: {pformat(config)}' - ) - - if es: - from piker.data._ahab import start_ahab - from piker.data.elastic import start_elasticsearch - - log.info('Spawning `elasticsearch` supervisor') - ctn_ready, config, (cid, pid) = await service_nursery.start( - partial( - start_ahab, - 'elasticsearch', - start_elasticsearch, - loglevel=loglevel, - ) - ) - - log.info( - f'`elasticsearch` up!\n' - f'pid: {pid}\n' - f'container id: {cid[:12]}\n' - f'config: {pformat(config)}' - ) - - try: - yield Services - - finally: - # TODO: is this more clever/efficient? - # if 'samplerd' in Services.service_tasks: - # await Services.cancel_service('samplerd') - service_nursery.cancel_scope.cancel() - - -@acm -async def maybe_open_runtime( - loglevel: Optional[str] = None, - **kwargs, - -) -> None: - ''' - Start the ``tractor`` runtime (a root actor) if none exists. - - ''' - name = kwargs.pop('name') - - if not tractor.current_actor(err_on_no_runtime=False): - async with open_piker_runtime( - name, - loglevel=loglevel, - **kwargs, - ) as (_, addr): - yield addr, - else: - async with open_registry() as addr: - yield addr - - -@acm -async def maybe_open_pikerd( - loglevel: Optional[str] = None, - registry_addr: None | tuple = None, - tsdb: bool = False, - es: bool = False, - - **kwargs, - -) -> tractor._portal.Portal | ClassVar[Services]: - ''' - If no ``pikerd`` daemon-root-actor can be found start it and - yield up (we should probably figure out returning a portal to self - though). - - ''' - if loglevel: - get_console_log(loglevel) - - # subtle, we must have the runtime up here or portal lookup will fail - query_name = kwargs.pop('name', f'piker_query_{os.getpid()}') - - # TODO: if we need to make the query part faster we could not init - # an actor runtime and instead just hit the socket? - # from tractor._ipc import _connect_chan, Channel - # async with _connect_chan(host, port) as chan: - # async with open_portal(chan) as arb_portal: - # yield arb_portal - - async with ( - open_piker_runtime( - name=query_name, - registry_addr=registry_addr, - loglevel=loglevel, - **kwargs, - ) as _, - tractor.find_actor( - _root_dname, - arbiter_sockaddr=registry_addr, - ) as portal - ): - # connect to any existing daemon presuming - # its registry socket was selected. - if ( - portal is not None - ): - yield portal - return - - # presume pikerd role since no daemon could be found at - # configured address - async with open_pikerd( - loglevel=loglevel, - debug_mode=kwargs.get('debug_mode', False), - registry_addr=registry_addr, - tsdb=tsdb, - es=es, - - ) as service_manager: - # in the case where we're starting up the - # tractor-piker runtime stack in **this** process - # we return no portal to self. - assert service_manager - yield service_manager diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py new file mode 100644 index 0000000000..257babdf9d --- /dev/null +++ b/piker/service/_actor_runtime.py @@ -0,0 +1,329 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +``tractor`` wrapping + default config to bootstrap the `pikerd`. + +""" +from __future__ import annotations +from pprint import pformat +from functools import partial +import os +from typing import ( + Optional, + Any, + ClassVar, +) +from contextlib import ( + asynccontextmanager as acm, +) + +import tractor +import trio + +from ..log import ( + get_logger, + get_console_log, +) +from ._mngr import ( + Services, +) +from ._registry import ( # noqa + _tractor_kwargs, + _default_reg_addr, + open_registry, +) + +log = get_logger(__name__) + +_root_dname = 'pikerd' + + +def get_tractor_runtime_kwargs() -> dict[str, Any]: + ''' + Deliver ``tractor`` related runtime variables in a `dict`. + + ''' + return _tractor_kwargs + + +_root_modules = [ + __name__, + 'piker.service._daemon', + 'piker.clearing._ems', + 'piker.clearing._client', + 'piker.data._sampling', +] + + +@acm +async def open_piker_runtime( + name: str, + enable_modules: list[str] = [], + loglevel: Optional[str] = None, + + # XXX NOTE XXX: you should pretty much never want debug mode + # for data daemons when running in production. + debug_mode: bool = False, + + registry_addr: None | tuple[str, int] = None, + + # TODO: once we have `rsyscall` support we will read a config + # and spawn the service tree distributed per that. + start_method: str = 'trio', + + **tractor_kwargs, + +) -> tuple[ + tractor.Actor, + tuple[str, int], +]: + ''' + Start a piker actor who's runtime will automatically sync with + existing piker actors on the local link based on configuration. + + Can be called from a subactor or any program that needs to start + a root actor. + + ''' + try: + # check for existing runtime + actor = tractor.current_actor().uid + + except tractor._exceptions.NoRuntime: + + registry_addr = registry_addr or _default_reg_addr + + async with ( + tractor.open_root_actor( + + # passed through to ``open_root_actor`` + arbiter_addr=registry_addr, + name=name, + loglevel=loglevel, + debug_mode=debug_mode, + start_method=start_method, + + # TODO: eventually we should be able to avoid + # having the root have more then permissions to + # spawn other specialized daemons I think? + enable_modules=enable_modules, + + **tractor_kwargs, + ) as _, + + open_registry(registry_addr, ensure_exists=False) as addr, + ): + yield ( + tractor.current_actor(), + addr, + ) + else: + async with open_registry(registry_addr) as addr: + yield ( + actor, + addr, + ) + + +@acm +async def open_pikerd( + + loglevel: str | None = None, + + # XXX: you should pretty much never want debug mode + # for data daemons when running in production. + debug_mode: bool = False, + registry_addr: None | tuple[str, int] = None, + + # db init flags + tsdb: bool = False, + es: bool = False, + +) -> Services: + ''' + Start a root piker daemon who's lifetime extends indefinitely until + cancelled. + + A root actor nursery is created which can be used to create and keep + alive underling services (see below). + + ''' + async with ( + open_piker_runtime( + + name=_root_dname, + # TODO: eventually we should be able to avoid + # having the root have more then permissions to + # spawn other specialized daemons I think? + enable_modules=_root_modules, + + loglevel=loglevel, + debug_mode=debug_mode, + registry_addr=registry_addr, + + ) as (root_actor, reg_addr), + tractor.open_nursery() as actor_nursery, + trio.open_nursery() as service_nursery, + ): + if root_actor.accept_addr != reg_addr: + raise RuntimeError(f'Daemon failed to bind on {reg_addr}!?') + + # assign globally for future daemon/task creation + Services.actor_n = actor_nursery + Services.service_n = service_nursery + Services.debug_mode = debug_mode + + if tsdb: + from ._ahab import start_ahab + from .marketstore import start_marketstore + + log.info('Spawning `marketstore` supervisor') + ctn_ready, config, (cid, pid) = await service_nursery.start( + partial( + start_ahab, + 'marketstored', + start_marketstore, + loglevel=loglevel, + ) + + ) + log.info( + f'`marketstored` up!\n' + f'pid: {pid}\n' + f'container id: {cid[:12]}\n' + f'config: {pformat(config)}' + ) + + if es: + from piker.data._ahab import start_ahab + from piker.data.elastic import start_elasticsearch + + log.info('Spawning `elasticsearch` supervisor') + ctn_ready, config, (cid, pid) = await service_nursery.start( + partial( + start_ahab, + 'elasticsearch', + start_elasticsearch, + loglevel=loglevel, + ) + ) + + log.info( + f'`elasticsearch` up!\n' + f'pid: {pid}\n' + f'container id: {cid[:12]}\n' + f'config: {pformat(config)}' + ) + + try: + yield Services + + finally: + # TODO: is this more clever/efficient? + # if 'samplerd' in Services.service_tasks: + # await Services.cancel_service('samplerd') + service_nursery.cancel_scope.cancel() + + +@acm +async def maybe_open_runtime( + loglevel: Optional[str] = None, + **kwargs, + +) -> None: + ''' + Start the ``tractor`` runtime (a root actor) if none exists. + + ''' + name = kwargs.pop('name') + + if not tractor.current_actor(err_on_no_runtime=False): + async with open_piker_runtime( + name, + loglevel=loglevel, + **kwargs, + ) as (_, addr): + yield addr, + else: + async with open_registry() as addr: + yield addr + + +@acm +async def maybe_open_pikerd( + loglevel: Optional[str] = None, + registry_addr: None | tuple = None, + tsdb: bool = False, + es: bool = False, + + **kwargs, + +) -> tractor._portal.Portal | ClassVar[Services]: + ''' + If no ``pikerd`` daemon-root-actor can be found start it and + yield up (we should probably figure out returning a portal to self + though). + + ''' + if loglevel: + get_console_log(loglevel) + + # subtle, we must have the runtime up here or portal lookup will fail + query_name = kwargs.pop('name', f'piker_query_{os.getpid()}') + + # TODO: if we need to make the query part faster we could not init + # an actor runtime and instead just hit the socket? + # from tractor._ipc import _connect_chan, Channel + # async with _connect_chan(host, port) as chan: + # async with open_portal(chan) as arb_portal: + # yield arb_portal + + async with ( + open_piker_runtime( + name=query_name, + registry_addr=registry_addr, + loglevel=loglevel, + **kwargs, + ) as _, + tractor.find_actor( + _root_dname, + arbiter_sockaddr=registry_addr, + ) as portal + ): + # connect to any existing daemon presuming + # its registry socket was selected. + if ( + portal is not None + ): + yield portal + return + + # presume pikerd role since no daemon could be found at + # configured address + async with open_pikerd( + loglevel=loglevel, + debug_mode=kwargs.get('debug_mode', False), + registry_addr=registry_addr, + tsdb=tsdb, + es=es, + + ) as service_manager: + # in the case where we're starting up the + # tractor-piker runtime stack in **this** process + # we return no portal to self. + assert service_manager + yield service_manager From cec29670714b7f0b477244dbf190a1ce9d142167 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 16:47:28 -0500 Subject: [PATCH 021/136] Import `maybe_open_pikerd` at module level --- piker/service/_daemon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/service/_daemon.py b/piker/service/_daemon.py index 8cb9054fa6..45d6cb81e6 100644 --- a/piker/service/_daemon.py +++ b/piker/service/_daemon.py @@ -38,6 +38,7 @@ from ._mngr import ( Services, ) +from ._actor_runtime import maybe_open_pikerd from ._registry import find_service log = get_logger(__name__) @@ -100,7 +101,6 @@ async def maybe_spawn_daemon( # ask root ``pikerd`` daemon to spawn the daemon we need if # pikerd is not live we now become the root of the # process tree - from . import maybe_open_pikerd async with maybe_open_pikerd( loglevel=loglevel, From 441243f83bccd2938cb3b705ae0484cf6dd2dbda Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 20:06:27 -0500 Subject: [PATCH 022/136] Attempt to report `piker storage -d ` errors Not really sure there's much we can do besides dump Grpc stuff when we detect an "error" `str` for the moment.. Either way leave a buncha complaints (como siempre) and do linting fixups.. --- piker/data/cli.py | 61 ++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/piker/data/cli.py b/piker/data/cli.py index 6f4e169de5..6984d9ff61 100644 --- a/piker/data/cli.py +++ b/piker/data/cli.py @@ -18,34 +18,22 @@ marketstore cli. """ -from functools import partial -from pprint import ( - pformat, - pprint, -) - -from anyio_marketstore import open_marketstore_client import trio import tractor import click -import numpy as np from ..service.marketstore import ( - get_client, + # get_client, # stream_quotes, ingest_quote_stream, # _url, - _tick_tbk_ids, - mk_tbk, + # _tick_tbk_ids, + # mk_tbk, ) from ..cli import cli from .. import watchlists as wl -from ..log import get_logger -from ._sharedmem import ( - maybe_open_shm_array, -) -from ._source import ( - base_iohlc_dtype, +from ..log import ( + get_logger, ) @@ -92,16 +80,16 @@ async def main(): # async def main(): # nonlocal names # async with get_client(url) as client: -# +# # if not names: # names = await client.list_symbols() -# +# # # default is to wipe db entirely. # answer = input( # "This will entirely wipe you local marketstore db @ " # f"{url} of the following symbols:\n {pformat(names)}" # "\n\nDelete [N/y]?\n") -# +# # if answer == 'y': # for sym in names: # # tbk = _tick_tbk.format(sym) @@ -110,7 +98,7 @@ async def main(): # await client.destroy(mk_tbk(tbk)) # else: # print("Nothing deleted.") -# +# # tractor.run(main) @@ -148,7 +136,7 @@ async def main(): ): symbol = symbols[0] - async with open_tsdb_client(symbol) as storage: + async with open_tsdb_client(symbol): # TODO: ask if user wants to write history for detected # available shm buffers? from tractor.trionics import ipython_embed @@ -186,7 +174,7 @@ def storage( Start an IPython shell ready to query the local marketstore db. ''' - from piker.data.marketstore import open_tsdb_client + from piker.service.marketstore import open_tsdb_client from piker.service import open_piker_runtime async def main(): @@ -201,9 +189,28 @@ async def main(): if delete: for fqsn in symbols: syms = await storage.client.list_symbols() - breakpoint() - await storage.delete_ts(fqsn, 60) - await storage.delete_ts(fqsn, 1) + + resp60s = await storage.delete_ts(fqsn, 60) + + msgish = resp60s.ListFields()[0][1] + if 'error' in str(msgish): + + # TODO: MEGA LOL, apparently the symbols don't + # flush out until you refresh something or other + # (maybe the WALFILE)... #lelandorlulzone, classic + # alpaca(Rtm) design here .. + # well, if we ever can make this work we + # probably want to dogsplain the real reason + # for the delete errurz..llululu + if fqsn not in syms: + log.error(f'Pair {fqsn} dne in DB') + + log.error(f'Deletion error: {fqsn}\n{msgish}') + + resp1s = await storage.delete_ts(fqsn, 1) + msgish = resp1s.ListFields()[0][1] + if 'error' in str(msgish): + log.error(f'Deletion error: {fqsn}\n{msgish}') trio.run(main) @@ -237,7 +244,7 @@ def ingest(config, name, test_file, tl): async def entry_point(): async with tractor.open_nursery() as n: - for provider, symbols in grouped_syms.items(): + for provider, symbols in grouped_syms.items(): await n.run_in_actor( ingest_quote_stream, name='ingest_marketstore', From 6f92c6b52d1931e0c5c02ae09716c9edc61b2481 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 20:08:27 -0500 Subject: [PATCH 023/136] Don't crash on a `xdotool` timeout.. --- piker/brokers/ib/_util.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/piker/brokers/ib/_util.py b/piker/brokers/ib/_util.py index c7a499091c..d6491ee734 100644 --- a/piker/brokers/ib/_util.py +++ b/piker/brokers/ib/_util.py @@ -177,8 +177,11 @@ def i3ipc_xdotool_manual_click_hack() -> None: ) # re-activate and focus original window - subprocess.call([ - 'xdotool', - 'windowactivate', '--sync', str(orig_win_id), - 'click', '--window', str(orig_win_id), '1', - ]) + try: + subprocess.call([ + 'xdotool', + 'windowactivate', '--sync', str(orig_win_id), + 'click', '--window', str(orig_win_id), '1', + ]) + except subprocess.TimeoutExpired: + log.exception(f'xdotool timed out?') From cda7a54718329771524662a9b64d3a3bdd6829f4 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 20:20:11 -0500 Subject: [PATCH 024/136] Fix final missed `marketstore` mod import Thanks @esme! XD Also, do a linter pass and remove a buncha unused references. --- tests/test_databases.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/tests/test_databases.py b/tests/test_databases.py index 7fcee34ade..a469abd5fe 100644 --- a/tests/test_databases.py +++ b/tests/test_databases.py @@ -1,25 +1,19 @@ -import pytest import trio from typing import AsyncContextManager -from piker.service import Services -from piker.log import get_logger - from elasticsearch import Elasticsearch -from piker.data import marketstore +from piker.service import marketstore + def test_marketstore_startup_and_version( open_test_pikerd: AsyncContextManager, loglevel, ): - ''' Verify marketstore starts correctly ''' - log = get_logger(__name__) - async def main(): # port = 5995 @@ -36,7 +30,6 @@ async def main(): len('3862e9973da36cfc6004b88172c08f09269aaf01') ) - trio.run(main) @@ -49,8 +42,6 @@ def test_elasticsearch_startup_and_version( ''' - log = get_logger(__name__) - async def main(): port = 19200 @@ -62,5 +53,4 @@ async def main(): es = Elasticsearch(hosts=[f'http://localhost:{port}']) assert es.info()['version']['number'] == '7.17.4' - trio.run(main) From fbc12b1b077d0e83685fd5249a929687d871ed94 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 21:20:41 -0500 Subject: [PATCH 025/136] Add 10min timeout on CI job.. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f799bc2269..65b020f1dd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,6 +36,7 @@ jobs: testing: name: 'install + test-suite' + timeout-minutes: 10 runs-on: ubuntu-latest steps: From 6540c415c1b84619a43904ce1e93c2983f1bb6eb Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Mar 2023 21:40:28 -0500 Subject: [PATCH 026/136] Lul, fix imports in elasticsearch block.. --- piker/service/_actor_runtime.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py index 257babdf9d..e07f342e8f 100644 --- a/piker/service/_actor_runtime.py +++ b/piker/service/_actor_runtime.py @@ -209,8 +209,8 @@ async def open_pikerd( ) if es: - from piker.data._ahab import start_ahab - from piker.data.elastic import start_elasticsearch + from ._ahab import start_ahab + from .elastic import start_elasticsearch log.info('Spawning `elasticsearch` supervisor') ctn_ready, config, (cid, pid) = await service_nursery.start( From 31392af427204949437846b8289230cce5183afa Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 12:22:33 -0500 Subject: [PATCH 027/136] Move enabled module defs to just above where used --- piker/service/_actor_runtime.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py index e07f342e8f..134b085c49 100644 --- a/piker/service/_actor_runtime.py +++ b/piker/service/_actor_runtime.py @@ -49,8 +49,6 @@ log = get_logger(__name__) -_root_dname = 'pikerd' - def get_tractor_runtime_kwargs() -> dict[str, Any]: ''' @@ -60,15 +58,6 @@ def get_tractor_runtime_kwargs() -> dict[str, Any]: return _tractor_kwargs -_root_modules = [ - __name__, - 'piker.service._daemon', - 'piker.clearing._ems', - 'piker.clearing._client', - 'piker.data._sampling', -] - - @acm async def open_piker_runtime( name: str, @@ -139,6 +128,16 @@ async def open_piker_runtime( ) +_root_dname = 'pikerd' +_root_modules = [ + __name__, + 'piker.service._daemon', + 'piker.clearing._ems', + 'piker.clearing._client', + 'piker.data._sampling', +] + + @acm async def open_pikerd( @@ -155,8 +154,7 @@ async def open_pikerd( ) -> Services: ''' - Start a root piker daemon who's lifetime extends indefinitely until - cancelled. + Start a root piker daemon with an indefinite lifetime. A root actor nursery is created which can be used to create and keep alive underling services (see below). From 75b7a8b56ef15a66a6a9ca499c4e27fbbe09f4a1 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 12:23:02 -0500 Subject: [PATCH 028/136] `marketstore`: Pull default socket from server config --- piker/service/marketstore.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/service/marketstore.py b/piker/service/marketstore.py index e9de95580a..5c4f90db67 100644 --- a/piker/service/marketstore.py +++ b/piker/service/marketstore.py @@ -328,7 +328,7 @@ def quote_to_marketstore_structarray( @acm async def get_client( host: str = 'localhost', - port: int = 5995 + port: int = _config['grpc_listen_port'], ) -> MarketstoreClient: ''' From 2014019b0601398ba3410250f921d611d81c2c05 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 12:23:46 -0500 Subject: [PATCH 029/136] Add reconnect loop to `marketstore` startup test Due to making ahabd supervisor init more async we need to be more tolerant to mkts server startup: the grpc machinery needs to be up otherwise a client which connects to early may just hang on requests.. Add a reconnect loop (which might end up getting factored into client code too) so that we only block on requests once we know the client connection is actually responsive. --- tests/test_databases.py | 49 ++++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/tests/test_databases.py b/tests/test_databases.py index a469abd5fe..74f0f240b8 100644 --- a/tests/test_databases.py +++ b/tests/test_databases.py @@ -8,40 +8,67 @@ def test_marketstore_startup_and_version( open_test_pikerd: AsyncContextManager, - loglevel, + loglevel: str, ): ''' - Verify marketstore starts correctly + Verify marketstore tsdb starts up and we can + connect with a client to do basic API reqs. ''' async def main(): - # port = 5995 async with ( open_test_pikerd( loglevel=loglevel, tsdb=True - ) as (s, i, pikerd_portal, services), - marketstore.get_client() as client + ) as ( + _, # host + _, # port + pikerd_portal, + services, + ), ): + # XXX NOTE: we use a retry-connect loop because it seems + # that if we connect *too fast* to a booting container + # instance (i.e. if mkts's IPC machinery isn't up early + # enough) the client will hang on req-resp submissions. So, + # instead we actually reconnect the client entirely in + # a loop until we get a response. + for _ in range(3): - assert ( - len(await client.server_version()) == - len('3862e9973da36cfc6004b88172c08f09269aaf01') - ) + # NOTE: default sockaddr is embedded within + async with marketstore.get_client() as client: + + with trio.move_on_after(1) as cs: + syms = await client.list_symbols() + + if cs.cancelled_caught: + continue + + + # should be an empty db? + assert not syms + print(f'RX syms resp: {syms}') + + assert ( + len(await client.server_version()) == + len('3862e9973da36cfc6004b88172c08f09269aaf01') + ) + print('VERSION CHECKED') + + break # get out of retry-connect loop trio.run(main) def test_elasticsearch_startup_and_version( open_test_pikerd: AsyncContextManager, - loglevel, + loglevel: str, ): ''' Verify elasticsearch starts correctly ''' - async def main(): port = 19200 From aa36abf36e5e13228a90304a03a30379e3e74651 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 14:09:12 -0500 Subject: [PATCH 030/136] Support passing `tractor` "actor runtime vars" down the runtime --- piker/service/_actor_runtime.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py index 134b085c49..3e35864db8 100644 --- a/piker/service/_actor_runtime.py +++ b/piker/service/_actor_runtime.py @@ -74,6 +74,7 @@ async def open_piker_runtime( # and spawn the service tree distributed per that. start_method: str = 'trio', + tractor_runtime_overrides: dict | None = None, **tractor_kwargs, ) -> tuple[ @@ -93,6 +94,8 @@ async def open_piker_runtime( actor = tractor.current_actor().uid except tractor._exceptions.NoRuntime: + tractor._state._runtime_vars[ + 'piker_vars'] = tractor_runtime_overrides registry_addr = registry_addr or _default_reg_addr @@ -152,6 +155,8 @@ async def open_pikerd( tsdb: bool = False, es: bool = False, + **kwargs, + ) -> Services: ''' Start a root piker daemon with an indefinite lifetime. @@ -173,6 +178,8 @@ async def open_pikerd( debug_mode=debug_mode, registry_addr=registry_addr, + **kwargs, + ) as (root_actor, reg_addr), tractor.open_nursery() as actor_nursery, trio.open_nursery() as service_nursery, @@ -297,6 +304,7 @@ async def maybe_open_pikerd( loglevel=loglevel, **kwargs, ) as _, + tractor.find_actor( _root_dname, arbiter_sockaddr=registry_addr, @@ -319,6 +327,8 @@ async def maybe_open_pikerd( tsdb=tsdb, es=es, + **kwargs, + ) as service_manager: # in the case where we're starting up the # tractor-piker runtime stack in **this** process From 5aaa7f47dc894282c99008dd9e94588483cacdfc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 14:09:35 -0500 Subject: [PATCH 031/136] Pull testing config dir from `tractor` runtime vars Provides a more correct solution (particularly for distributed testing) to override the `piker` configuration directory by reading the path from a specific `tractor._state._runtime_vars` entry that can be provided by the test harness. Also fix some typing and comments. --- piker/config.py | 44 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/piker/config.py b/piker/config.py index 3ae6a66520..397342e34e 100644 --- a/piker/config.py +++ b/piker/config.py @@ -15,7 +15,7 @@ # along with this program. If not, see . """ -Broker configuration mgmt. +Platform configuration (files) mgmt. """ import platform @@ -26,17 +26,25 @@ import shutil from typing import Optional from pathlib import Path + from bidict import bidict import toml -from piker.testing import TEST_CONFIG_DIR_PATH + from .log import get_logger log = get_logger('broker-config') -# taken from ``click`` since apparently they have some +# XXX NOTE: taken from ``click`` since apparently they have some # super weirdness with sigint and sudo..no clue -def get_app_dir(app_name, roaming=True, force_posix=False): +# we're probably going to slowly just modify it to our own version over +# time.. +def get_app_dir( + app_name: str, + roaming: bool = True, + force_posix: bool = False, + +) -> str: r"""Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. @@ -75,14 +83,30 @@ def get_app_dir(app_name, roaming=True, force_posix=False): def _posixify(name): return "-".join(name.split()).lower() - # TODO: This is a hacky way to a) determine we're testing - # and b) creating a test dir. We should aim to set a variable - # within the tractor runtimes and store testing config data - # outside of the users filesystem + # NOTE: for testing with `pytest` we leverage the `tmp_dir` + # fixture to generate (and clean up) a test-request-specific + # directory for isolated configuration files such that, + # - multiple tests can run (possibly in parallel) without data races + # on the config state, + # - we don't need to ever worry about leaking configs into the + # system thus avoiding needing to manage config cleaup fixtures or + # other bothers (since obviously `tmp_dir` cleans up after itself). + # + # In order to "pass down" the test dir path to all (sub-)actors in + # the actor tree we preload the root actor's runtime vars state (an + # internal mechanism for inheriting state down an actor tree in + # `tractor`) with the testing dir and check for it whenever we + # detect `pytest` is being used (which it isn't under normal + # operation). if "pytest" in sys.modules: - app_name = os.path.join(app_name, TEST_CONFIG_DIR_PATH) + import tractor + actor = tractor.current_actor(err_on_no_runtime=False) + if actor: # runtime is up + rvs = tractor._state._runtime_vars + testdirpath = Path(rvs['piker_vars']['piker_test_dir']) + assert testdirpath.exists(), 'piker test harness might be borked!?' + app_name = str(testdirpath) - # if WIN: if platform.system() == 'Windows': key = "APPDATA" if roaming else "LOCALAPPDATA" folder = os.environ.get(key) From 79b0db44496dfc441a1c59b491b66ed67ec2ed9a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 14:33:12 -0500 Subject: [PATCH 032/136] Pass a config `tmp_dir: Path` to the runtime when testing --- piker/testing/__init__.py | 1 - tests/conftest.py | 47 +++++++++++++++++++++++---------------- tests/test_paper.py | 20 ++++++++--------- 3 files changed, 37 insertions(+), 31 deletions(-) delete mode 100644 piker/testing/__init__.py diff --git a/piker/testing/__init__.py b/piker/testing/__init__.py deleted file mode 100644 index 5e3ac93aca..0000000000 --- a/piker/testing/__init__.py +++ /dev/null @@ -1 +0,0 @@ -TEST_CONFIG_DIR_PATH = '_testing' diff --git a/tests/conftest.py b/tests/conftest.py index 68d392aacc..a7244d82a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,12 +2,10 @@ from functools import partial import os from pathlib import Path -from shutil import rmtree import pytest import tractor from piker import ( - # log, config, ) from piker.service import ( @@ -71,6 +69,7 @@ def ci_env() -> bool: @acm async def _open_test_pikerd( + tmpconfdir: str, reg_addr: tuple[str, int] | None = None, loglevel: str = 'warning', **kwargs, @@ -97,6 +96,10 @@ async def _open_test_pikerd( maybe_open_pikerd( registry_addr=reg_addr, loglevel=loglevel, + + tractor_runtime_overrides={ + 'piker_test_dir': tmpconfdir, + }, **kwargs, ) as service_manager, ): @@ -119,18 +122,40 @@ async def _open_test_pikerd( @pytest.fixture def open_test_pikerd( - request, + request: pytest.FixtureRequest, + tmp_path: Path, loglevel: str, ): + tmpconfdir: Path = tmp_path / '_testing' + tmpconfdir.mkdir() + tmpconfdir_str: str = str(tmpconfdir) + + # NOTE: on linux the tmp config dir is generally located at: + # /tmp/pytest-of-/pytest-/test_/ + # the default `pytest` config ensures that only the last 4 test + # suite run's dirs will be persisted, otherwise they are removed: + # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory + print(f'CURRENT TEST CONF DIR: {tmpconfdir}') yield partial( _open_test_pikerd, + # pass in a unique temp dir for this test request + # so that we can have multiple tests running (maybe in parallel) + # bwitout clobbering each other's config state. + tmpconfdir=tmpconfdir_str, + # bind in level from fixture, which is itself set by # `--ll ` cli flag. loglevel=loglevel, ) + # NOTE: the `tmp_dir` fixture will wipe any files older then 3 test + # sessions by default: + # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory + # BUT, if we wanted to always wipe conf dir and all contained files, + # rmtree(str(tmp_path)) + # TODO: teardown checks such as, # - no leaked subprocs or shm buffers # - all requested container service are torn down @@ -169,19 +194,3 @@ def open_test_pikerd_and_ems( loglevel, open_test_pikerd ) - - -@pytest.fixture(scope='module') -def delete_testing_dir(): - ''' - This fixture removes the temp directory - used for storing all config/ledger/pp data - created during testing sessions. During test runs - this file can be found in .config/piker/_testing - - ''' - yield - app_dir = Path(config.get_app_dir('piker')).resolve() - if app_dir.is_dir(): - rmtree(str(app_dir)) - assert not app_dir.is_dir() diff --git a/tests/test_paper.py b/tests/test_paper.py index 8da1cf122a..53e03f4727 100644 --- a/tests/test_paper.py +++ b/tests/test_paper.py @@ -17,7 +17,6 @@ from piker.log import get_logger from piker.clearing._messages import Order from piker.pp import ( - open_trade_ledger, open_pps, ) @@ -42,18 +41,19 @@ async def _async_main( price: int = 30000, executions: int = 1, size: float = 0.01, + # Assert options assert_entries: bool = False, assert_pps: bool = False, assert_zeroed_pps: bool = False, assert_msg: bool = False, + ) -> None: ''' Start piker, place a trade and assert data in pps stream, ledger and position table. ''' - oid: str = '' last_msg = {} @@ -136,7 +136,7 @@ def _assert( def _run_test_and_check(fn): - ''' + ''' Close position and assert empty position in pps ''' @@ -150,8 +150,7 @@ def _run_test_and_check(fn): def test_buy( - open_test_pikerd_and_ems: AsyncContextManager, - delete_testing_dir + open_test_pikerd_and_ems: AsyncContextManager, ): ''' Enter a trade and assert entries are made in pps and ledger files. @@ -177,8 +176,7 @@ def test_buy( def test_sell( - open_test_pikerd_and_ems: AsyncContextManager, - delete_testing_dir + open_test_pikerd_and_ems: AsyncContextManager, ): ''' Sell position and ensure pps are zeroed. @@ -201,13 +199,13 @@ def test_sell( ), ) + def test_multi_sell( - open_test_pikerd_and_ems: AsyncContextManager, - delete_testing_dir + open_test_pikerd_and_ems: AsyncContextManager, ): ''' - Make 5 market limit buy orders and - then sell 5 slots at the same price. + Make 5 market limit buy orders and + then sell 5 slots at the same price. Finally, assert cleared positions. ''' From 7cc99115655de4bb6ae711ae55e455d9660b9917 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 14:54:46 -0500 Subject: [PATCH 033/136] Add connection poll loop to es test as well --- tests/test_databases.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/tests/test_databases.py b/tests/test_databases.py index 74f0f240b8..a29d14f31b 100644 --- a/tests/test_databases.py +++ b/tests/test_databases.py @@ -2,7 +2,10 @@ from typing import AsyncContextManager -from elasticsearch import Elasticsearch +from elasticsearch import ( + Elasticsearch, + ConnectionError, +) from piker.service import marketstore @@ -45,8 +48,8 @@ async def main(): if cs.cancelled_caught: continue - - # should be an empty db? + # should be an empty db (for now) since we spawn + # marketstore in a ephemeral test-harness dir. assert not syms print(f'RX syms resp: {syms}') @@ -72,12 +75,25 @@ def test_elasticsearch_startup_and_version( async def main(): port = 19200 - async with open_test_pikerd( - loglevel=loglevel, - es=True - ) as (s, i, pikerd_portal, services): + async with ( + open_test_pikerd( + loglevel=loglevel, + es=True + ) as ( + _, # host + _, # port + pikerd_portal, + services, + ), + ): + + for _ in range(240): + try: + es = Elasticsearch(hosts=[f'http://localhost:{port}']) + except ConnectionError: + await trio.sleep(1) + continue - es = Elasticsearch(hosts=[f'http://localhost:{port}']) - assert es.info()['version']['number'] == '7.17.4' + assert es.info()['version']['number'] == '7.17.4' trio.run(main) From 9a00c459233b2366413b16301d08ef1ae7383732 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 17:57:42 -0500 Subject: [PATCH 034/136] Add `log` fixture for easy test plugin --- tests/conftest.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index a7244d82a2..327b71a911 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,6 @@ from contextlib import asynccontextmanager as acm from functools import partial +import logging import os from pathlib import Path @@ -11,6 +12,7 @@ from piker.service import ( Services, ) +from piker.log import get_console_log from piker.clearing._client import open_ems @@ -67,6 +69,21 @@ def ci_env() -> bool: return _ci_env +@pytest.fixture() +def log( + request: pytest.FixtureRequest, + loglevel: str, +) -> logging.Logger: + ''' + Deliver a per-test-named ``piker.log`` instance. + + ''' + return get_console_log( + level=loglevel, + name=request.node.name, + ) + + @acm async def _open_test_pikerd( tmpconfdir: str, From 15064d94cb82eef4488e63db736afde3c674070d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 17:58:13 -0500 Subject: [PATCH 035/136] `ahabd`: Harden cancellation teardown (again XD) Needed to move the startup sequence inside the `try:` block to guarantee we always do the (now shielded) `.cancel()` call if we get a cancel during startup. Also, support an optional `started_afunc` field in the config if backends want to just provide a one-off blocking async func to sync container startup. Add a `drop_root_perms: bool` to allow persisting sudo perms for testing or dyanmic container spawning purposes. --- piker/service/_ahab.py | 107 +++++++++++++++++++++++------------------ 1 file changed, 61 insertions(+), 46 deletions(-) diff --git a/piker/service/_ahab.py b/piker/service/_ahab.py index 38d4a9e7b2..7c3133e119 100644 --- a/piker/service/_ahab.py +++ b/piker/service/_ahab.py @@ -15,7 +15,8 @@ # along with this program. If not, see . ''' -Supervisor for docker with included specific-image service helpers. +Supervisor for ``docker`` with included async and SC wrapping +to ensure a cancellable container lifetime system. ''' from collections import ChainMap @@ -349,8 +350,8 @@ async def open_ahabd( ( dcntr, cntr_config, - start_lambda, - stop_lambda, + start_pred, + stop_pred, ) = ep_func(client) cntr = Container(dcntr) @@ -375,48 +376,58 @@ async def open_ahabd( # when read using: # ``json.loads(entry for entry in DockerContainer.logs())`` 'log_msg_key': 'msg', + + + # startup sync func, like `Nursery.started()` + 'started_afunc': None, }, ) - with trio.move_on_after(conf['startup_timeout']) as cs: - async with trio.open_nursery() as tn: - tn.start_soon( - partial( - cntr.process_logs_until, - log_msg_key=conf['log_msg_key'], - patt_matcher=start_lambda, - checkpoint_period=conf['startup_query_period'], + try: + with trio.move_on_after(conf['startup_timeout']) as cs: + async with trio.open_nursery() as tn: + tn.start_soon( + partial( + cntr.process_logs_until, + log_msg_key=conf['log_msg_key'], + patt_matcher=start_pred, + checkpoint_period=conf['startup_query_period'], + ) ) - ) - - # poll for container startup or timeout - while not cs.cancel_called: - if dcntr in client.containers.list(): - break - - await trio.sleep(conf['startup_query_period']) - - # sync with remote caller actor-task but allow log - # processing to continue running in bg. - await ctx.started(( - cntr.cntr.id, - os.getpid(), - cntr_config, - )) - try: - # XXX: if we timeout on finding the "startup msg" we expect then - # we want to FOR SURE raise an error upwards! - if cs.cancelled_caught: - # if dcntr not in client.containers.list(): - for entry in cntr.seen_so_far: - log.info(entry) - - raise DockerNotStarted( - f'Failed to start container: {cntr.cuid}\n' - f'due to startup_timeout={conf["startup_timeout"]}s\n\n' - "prolly you should check your container's logs for deats.." - ) + # optional blocking routine + started = conf['started_afunc'] + if started: + await started() + + # poll for container startup or timeout + while not cs.cancel_called: + if dcntr in client.containers.list(): + break + + await trio.sleep(conf['startup_query_period']) + + # sync with remote caller actor-task but allow log + # processing to continue running in bg. + await ctx.started(( + cntr.cntr.id, + os.getpid(), + cntr_config, + )) + + # XXX: if we timeout on finding the "startup msg" we + # expect then we want to FOR SURE raise an error + # upwards! + if cs.cancelled_caught: + # if dcntr not in client.containers.list(): + for entry in cntr.seen_so_far: + log.info(entry) + + raise DockerNotStarted( + f'Failed to start container: {cntr.cuid}\n' + f'due to timeout={conf["startup_timeout"]}s\n\n' + "check ur container's logs!" + ) # TODO: we might eventually want a proxy-style msg-prot here # to allow remote control of containers without needing @@ -430,17 +441,18 @@ async def open_ahabd( # on ctl-c from user.. ideally we can avoid a cancel getting # consumed and not propagating whilst still doing teardown # logging.. - # with trio.CancelScope(shield=True): - await cntr.cancel( - log_msg_key=conf['log_msg_key'], - stop_predicate=stop_lambda, - ) + with trio.CancelScope(shield=True): + await cntr.cancel( + log_msg_key=conf['log_msg_key'], + stop_predicate=stop_pred, + ) async def start_ahab( service_name: str, endpoint: Callable[docker.DockerClient, DockerContainer], loglevel: str | None = 'cancel', + drop_root_perms: bool = True, task_status: TaskStatus[ tuple[ @@ -477,7 +489,10 @@ async def start_ahab( # de-escalate root perms to the original user # after the docker supervisor actor is spawned. - if config._parent_user: + if ( + drop_root_perms + and config._parent_user + ): import pwd os.setuid( pwd.getpwnam( From 0772b4a0faac7aef77eebd4259e76b8f43136357 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 18:33:13 -0500 Subject: [PATCH 036/136] Hard code version from our container, predicate renaming --- piker/service/elastic.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/piker/service/elastic.py b/piker/service/elastic.py index fadcaa5e8e..31221d5700 100644 --- a/piker/service/elastic.py +++ b/piker/service/elastic.py @@ -40,6 +40,9 @@ _config = { 'port': 19200, 'log_level': 'debug', + + # hardcoded to our image version + 'version': '7.17.4', } @@ -77,21 +80,31 @@ def start_elasticsearch( remove=True ) - async def start_matcher(msg: str): + async def health_query(msg: str | None = None): + if ( + msg + and _config['version'] in msg + ): + return True + try: health = (await asks.get( 'http://localhost:19200/_cat/health', params={'format': 'json'} )).json() + kog.info( + 'ElasticSearch cntr health:\n' + f'{health}' + ) except OSError: - log.error('couldnt reach elastic container') + log.exception('couldnt reach elastic container') return False log.info(health) return health[0]['status'] == 'green' - async def stop_matcher(msg: str): + async def chk_for_closed_msg(msg: str): return msg == 'closed' return ( @@ -106,8 +119,10 @@ async def stop_matcher(msg: str): 'startup_query_period': 0.1, 'log_msg_key': 'message', + + # 'started_afunc': health_query, }, # expected startup and stop msgs - start_matcher, - stop_matcher, + health_query, + chk_for_closed_msg, ) From 44a31155393ed8bd9be4e89b4555aeb22c161526 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 18:34:21 -0500 Subject: [PATCH 037/136] Expose `drop_root_perms_for_ahab` from `pikerd` factories to `ahabd` --- piker/service/_actor_runtime.py | 52 ++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py index 3e35864db8..42829990f0 100644 --- a/piker/service/_actor_runtime.py +++ b/piker/service/_actor_runtime.py @@ -154,6 +154,7 @@ async def open_pikerd( # db init flags tsdb: bool = False, es: bool = False, + drop_root_perms_for_ahab: bool = True, **kwargs, @@ -203,6 +204,7 @@ async def open_pikerd( 'marketstored', start_marketstore, loglevel=loglevel, + drop_root_perms=drop_root_perms_for_ahab, ) ) @@ -224,6 +226,7 @@ async def open_pikerd( 'elasticsearch', start_elasticsearch, loglevel=loglevel, + drop_root_perms=drop_root_perms_for_ahab, ) ) @@ -244,28 +247,29 @@ async def open_pikerd( service_nursery.cancel_scope.cancel() -@acm -async def maybe_open_runtime( - loglevel: Optional[str] = None, - **kwargs, +# TODO: do we even need this? +# @acm +# async def maybe_open_runtime( +# loglevel: Optional[str] = None, +# **kwargs, -) -> None: - ''' - Start the ``tractor`` runtime (a root actor) if none exists. +# ) -> None: +# ''' +# Start the ``tractor`` runtime (a root actor) if none exists. - ''' - name = kwargs.pop('name') +# ''' +# name = kwargs.pop('name') - if not tractor.current_actor(err_on_no_runtime=False): - async with open_piker_runtime( - name, - loglevel=loglevel, - **kwargs, - ) as (_, addr): - yield addr, - else: - async with open_registry() as addr: - yield addr +# if not tractor.current_actor(err_on_no_runtime=False): +# async with open_piker_runtime( +# name, +# loglevel=loglevel, +# **kwargs, +# ) as (_, addr): +# yield addr, +# else: +# async with open_registry() as addr: +# yield addr @acm @@ -274,6 +278,7 @@ async def maybe_open_pikerd( registry_addr: None | tuple = None, tsdb: bool = False, es: bool = False, + drop_root_perms_for_ahab: bool = True, **kwargs, @@ -288,7 +293,10 @@ async def maybe_open_pikerd( get_console_log(loglevel) # subtle, we must have the runtime up here or portal lookup will fail - query_name = kwargs.pop('name', f'piker_query_{os.getpid()}') + query_name = kwargs.pop( + 'name', + f'piker_query_{os.getpid()}', + ) # TODO: if we need to make the query part faster we could not init # an actor runtime and instead just hit the socket? @@ -324,9 +332,13 @@ async def maybe_open_pikerd( loglevel=loglevel, debug_mode=kwargs.get('debug_mode', False), registry_addr=registry_addr, + + # ahabd (docker super) specific controls tsdb=tsdb, es=es, + drop_root_perms_for_ahab=drop_root_perms_for_ahab, + # passthrough to ``tractor`` init **kwargs, ) as service_manager: From 97290fcb05e84f5c8bbcdf2e43692ed6c10f518f Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 18:34:47 -0500 Subject: [PATCH 038/136] Never drop root perms in test harness --- tests/conftest.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 327b71a911..3a0afba246 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -117,7 +117,13 @@ async def _open_test_pikerd( tractor_runtime_overrides={ 'piker_test_dir': tmpconfdir, }, + + # tests may need to spawn containers dynamically + # or just in sequence per test, so we keep root. + drop_root_perms_for_ahab=False, + **kwargs, + ) as service_manager, ): # this proc/actor is the pikerd From 8ceaa278725bd2bee258d3dff3784f121d96a265 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 18:36:45 -0500 Subject: [PATCH 039/136] Add ES client polling to ensure eventual connectivity.. --- tests/test_databases.py | 49 +++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/tests/test_databases.py b/tests/test_databases.py index a29d14f31b..554b099020 100644 --- a/tests/test_databases.py +++ b/tests/test_databases.py @@ -1,12 +1,14 @@ -import trio - from typing import AsyncContextManager +import logging +import trio from elasticsearch import ( Elasticsearch, ConnectionError, ) + from piker.service import marketstore +from piker.service import elastic def test_marketstore_startup_and_version( @@ -31,6 +33,9 @@ async def main(): services, ), ): + # TODO: we should probably make this connection poll + # loop part of the `get_client()` implementation no? + # XXX NOTE: we use a retry-connect loop because it seems # that if we connect *too fast* to a booting container # instance (i.e. if mkts's IPC machinery isn't up early @@ -67,9 +72,11 @@ async def main(): def test_elasticsearch_startup_and_version( open_test_pikerd: AsyncContextManager, loglevel: str, + log: logging.Logger, ): ''' - Verify elasticsearch starts correctly + Verify elasticsearch starts correctly (like at some point before + infinity time).. ''' async def main(): @@ -86,14 +93,32 @@ async def main(): services, ), ): - - for _ in range(240): - try: - es = Elasticsearch(hosts=[f'http://localhost:{port}']) - except ConnectionError: - await trio.sleep(1) - continue - - assert es.info()['version']['number'] == '7.17.4' + # TODO: much like the above connect loop for mkts, we should + # probably make this sync start part of the + # ``open_client()`` implementation? + for i in range(240): + with Elasticsearch( + hosts=[f'http://localhost:{port}'] + ) as es: + try: + + resp = es.info() + assert ( + resp['version']['number'] + == + elastic._config['version'] + ) + print( + "OMG ELASTIX FINALLY CUKCING CONNECTED!>!>!\n" + f'resp: {resp}' + ) + break + + except ConnectionError: + log.exception( + f'RETRYING client connection for {i} time!' + ) + await trio.sleep(1) + continue trio.run(main) From 12883c3c9065e8d7c0a32136e74f7d1ea94e81b3 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 10 Mar 2023 09:56:55 -0500 Subject: [PATCH 040/136] Don't double send `enable_modules` and `debug_mode` in kwargs.. This broke non-disti-mode actor tree spawn / runtime, seemingly because the cli entrypoint for a `piker chart` also sends these values down through the call stack independently? Pretty sure we don't need to send the `enable_modules` from the chart actor anyway. --- piker/service/_actor_runtime.py | 2 -- piker/ui/cli.py | 3 --- 2 files changed, 5 deletions(-) diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py index 42829990f0..b92ad221d7 100644 --- a/piker/service/_actor_runtime.py +++ b/piker/service/_actor_runtime.py @@ -174,7 +174,6 @@ async def open_pikerd( # having the root have more then permissions to # spawn other specialized daemons I think? enable_modules=_root_modules, - loglevel=loglevel, debug_mode=debug_mode, registry_addr=registry_addr, @@ -330,7 +329,6 @@ async def maybe_open_pikerd( # configured address async with open_pikerd( loglevel=loglevel, - debug_mode=kwargs.get('debug_mode', False), registry_addr=registry_addr, # ahabd (docker super) specific controls diff --git a/piker/ui/cli.py b/piker/ui/cli.py index 9b8385f21f..15b3e9f616 100644 --- a/piker/ui/cli.py +++ b/piker/ui/cli.py @@ -181,9 +181,6 @@ def chart( 'debug_mode': pdb, 'loglevel': tractorloglevel, 'name': 'chart', - 'enable_modules': [ - 'piker.clearing._client' - ], 'registry_addr': config.get('registry_addr'), }, ) From 223e9d999c3175b42f8a5cd30f15a82954ca2935 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 15 Jan 2023 13:32:22 -0500 Subject: [PATCH 041/136] Add first-draft `PlotItemOverlay.group_maxmin()` Computes the maxmin values for each underlying plot's in-view range as well as the max up/down swing (in percentage terms) from the plot with most dispersion and returns a all these values plus a `dict` of plots to their ranges as part of output. --- piker/ui/_overlay.py | 97 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 4 deletions(-) diff --git a/piker/ui/_overlay.py b/piker/ui/_overlay.py index 7a5f047d1f..a17c1173d9 100644 --- a/piker/ui/_overlay.py +++ b/piker/ui/_overlay.py @@ -22,7 +22,6 @@ from functools import partial from typing import ( Callable, - Optional, ) from pyqtgraph.graphicsItems.AxisItem import AxisItem @@ -246,7 +245,7 @@ def get_axis( plot: PlotItem, name: str, - ) -> Optional[AxisItem]: + ) -> AxisItem | None: ''' Retrieve the named axis for overlayed ``plot`` or ``None`` if axis for that name is not shown. @@ -321,7 +320,7 @@ def overlays(self) -> list[PlotItem]: def add_plotitem( self, plotitem: PlotItem, - index: Optional[int] = None, + index: int | None = None, # event/signal names which will be broadcasted to all added # (relayee) ``PlotItem``s (eg. ``ViewBox.mouseDragEvent``). @@ -376,7 +375,7 @@ def broadcast( # TODO: drop this viewbox specific input and # allow a predicate to be passed in by user. - axis: 'Optional[int]' = None, + axis: int | None = None, *, @@ -578,3 +577,93 @@ def _disconnect_all( # ''' # ... + + def group_maxmin( + self, + focus_around: str | None = None, + force_min: float | None = None, + + ) -> tuple[ + float, # mn + float, # mx + float, # max range in % terms of highest sigma plot's y-range + PlotItem, # front/selected plot + ]: + ''' + Overlay "group" maxmin sorting. + + Assumes all named flows are in the same co-domain and thus can + be sorted as one set. + + Iterates all the named flows and calls the chart api to find + their range values and return. + + TODO: really we should probably have a more built-in API for + this? + + ''' + # TODO: + # - use this in the ``.ui._fsp`` mutli-maxmin stuff + # - + + # force 0 to always be in view + group_mx: float = 0 + group_mn: float = 0 + mx_up_rng: float = 0 + mn_down_rng: float = 0 + pis2ranges: dict[ + PlotItem, + tuple[float, float], + ] = {} + + for pi in self.overlays: + + # TODO: can we remove this from the widget + # and place somewhere more related to UX/Viz? + # name = pi.name + # chartw = pi.chart_widget + viz = pi.viz + # viz = chartw._vizs[name] + + out = viz.maxmin() + if out is None: + return None + + ( + (x_start, x_stop), + read_slc, + (ymn, ymx), + ) = out + + arr = viz.shm.array + + y_start = arr[read_slc.start - 1] + y_stop = arr[read_slc.stop - 1] + if viz.is_ohlc: + y_start = y_start['open'] + y_stop = y_stop['close'] + else: + y_start = y_start[viz.name] + y_stop = y_stop[viz.name] + + # update max for group + up_rng = (ymx - y_start) / y_start + down_rng = (y_stop - ymn) / y_stop + + # compute directional (up/down) y-range % swing/dispersion + mx_up_rng = max(mx_up_rng, up_rng) + mn_down_rng = min(mn_down_rng, down_rng) + + pis2ranges[pi] = (ymn, ymx) + + group_mx = max(group_mx, ymx) + if force_min is None: + group_mn = min(group_mn, ymn) + + return ( + group_mn if force_min is None else force_min, + group_mx, + mn_down_rng, + mx_up_rng, + pis2ranges, + ) From fc73becd5fa2b42d1f528ae8fc0ffa84e926660e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 18 Jan 2023 15:07:15 -0500 Subject: [PATCH 042/136] Drop Qt interaction signal usage It's kind of hard to understand with the C++ fan-out to multiple views (imo a cluster-f#$*&) and seems honestly just plain faster to loop (in python) through all the linked view handlers XD Core adjustments: - make the panning and wheel-scroll handlers just call `.maybe_downsample_graphics()` directly; drop all signal emissions. - make `.maybe_downsample_graphics()` loop through all vizs per subchart and use the new pipeline-style call sequence of: - `Viz.update_graphics() -> : tuple` - `Viz.maxmin(i_read_range=) -> yrange: tuple` - `Viz.plot.vb._set_yrange(yrange=yrange)` which inlines all the necessary calls in the most efficient way whilst leveraging `.maxmin()` caching and ymxmn-from-m4-during-render to boot. - drop registering `._set_yrange()` for handling `.sigRangeChangedManually`. --- piker/ui/_interaction.py | 98 ++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 39 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index c0e22d5001..18b485f0ae 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -395,6 +395,10 @@ def __init__( self._ic = None self._yranger: Callable | None = None + # TODO: probably just assign this whenever a new `PlotItem` is + # allocated since they're 1to1 with views.. + self._viz: Viz | None = None + def start_ic( self, ) -> None: @@ -533,6 +537,7 @@ def wheelEvent( # scale_y = 1.3 ** (center.y() * -1 / 20) self.scaleBy(s, center) + # zoom in view-box area else: # use right-most point of current curve graphic xl = viz.graphics.x_last() @@ -552,7 +557,7 @@ def wheelEvent( # update, but i gotta feelin that because this one is signal # based (and thus not necessarily sync invoked right away) # that calling the resize method manually might work better. - self.sigRangeChangedManually.emit(mask) + # self.sigRangeChangedManually.emit(mask) # XXX: without this is seems as though sometimes # when zooming in from far out (and maybe vice versa?) @@ -581,7 +586,10 @@ def mouseDragEvent( button = ev.button() # Ignore axes if mouse is disabled - mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float) + mouseEnabled = np.array( + self.state['mouseEnabled'], + dtype=np.float, + ) mask = mouseEnabled.copy() if axis is not None: mask[1-axis] = 0.0 @@ -664,7 +672,10 @@ def mouseDragEvent( if x is not None or y is not None: self.translateBy(x=x, y=y) - self.sigRangeChangedManually.emit(self.state['mouseEnabled']) + # self.sigRangeChangedManually.emit(mask) + # self.state['mouseEnabled'] + # ) + self.maybe_downsample_graphics() if ev.isFinish(): self.signal_ic() @@ -672,8 +683,8 @@ def mouseDragEvent( # self._ic = None # self.chart.resume_all_feeds() - # XXX: WHY - ev.accept() + # # XXX: WHY + # ev.accept() # WEIRD "RIGHT-CLICK CENTER ZOOM" MODE elif button & QtCore.Qt.RightButton: @@ -695,10 +706,12 @@ def mouseDragEvent( center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.RightButton))) self._resetTarget() self.scaleBy(x=x, y=y, center=center) - self.sigRangeChangedManually.emit(self.state['mouseEnabled']) - # XXX: WHY - ev.accept() + # self.sigRangeChangedManually.emit(self.state['mouseEnabled']) + self.maybe_downsample_graphics() + + # XXX: WHY + ev.accept() # def mouseClickEvent(self, event: QtCore.QEvent) -> None: # '''This routine is rerouted to an async handler. @@ -837,12 +850,6 @@ def enable_auto_yrange( viz=viz, ) - # widget-UIs/splitter(s) resizing - src_vb.sigResized.connect(self._yranger) - - # mouse wheel doesn't emit XRangeChanged - src_vb.sigRangeChangedManually.connect(self._yranger) - # re-sampling trigger: # TODO: a smarter way to avoid calling this needlessly? # 2 things i can think of: @@ -850,24 +857,33 @@ def enable_auto_yrange( # iterate those. # - only register this when certain downsample-able graphics are # "added to scene". - src_vb.sigRangeChangedManually.connect( + # src_vb.sigRangeChangedManually.connect( + # self.maybe_downsample_graphics + # ) + + # widget-UIs/splitter(s) resizing + src_vb.sigResized.connect( self.maybe_downsample_graphics ) + # mouse wheel doesn't emit XRangeChanged + # src_vb.sigRangeChangedManually.connect(self._yranger) + def disable_auto_yrange(self) -> None: # XXX: not entirely sure why we can't de-reg this.. self.sigResized.disconnect( - self._yranger, + # self._yranger, + self.maybe_downsample_graphics ) - self.sigRangeChangedManually.disconnect( - self._yranger, - ) + # self.sigRangeChangedManually.disconnect( + # self._yranger, + # ) - self.sigRangeChangedManually.disconnect( - self.maybe_downsample_graphics - ) + # self.sigRangeChangedManually.disconnect( + # self.maybe_downsample_graphics + # ) def x_uppx(self) -> float: ''' @@ -889,7 +905,6 @@ def x_uppx(self) -> float: def maybe_downsample_graphics( self, - autoscale_overlays: bool = False, ): profiler = Profiler( msg=f'ChartView.maybe_downsample_graphics() for {self.name}', @@ -912,10 +927,10 @@ def maybe_downsample_graphics( plots |= linked.subplots for chart_name, chart in plots.items(): - for name, flow in chart._vizs.items(): + for name, viz in chart._vizs.items(): if ( - not flow.render + not viz.render # XXX: super important to be aware of this. # or not flow.graphics.isVisible() @@ -925,19 +940,24 @@ def maybe_downsample_graphics( # pass in no array which will read and render from the last # passed array (normally provided by the display loop.) - chart.update_graphics_from_flow(name) - - # for each overlay on this chart auto-scale the - # y-range to max-min values. - # if autoscale_overlays: - # overlay = chart.pi_overlay - # if overlay: - # for pi in overlay.overlays: - # pi.vb._set_yrange( - # # TODO: get the range once up front... - # # bars_range=br, - # viz=pi.viz, - # ) - # profiler('autoscaled linked plots') + i_read_range, _ = viz.update_graphics() + out = viz.maxmin(i_read_range=i_read_range) + if out is None: + log.warning(f'No yrange provided for {name}!?') + return + ( + ixrng, + _, + yrange + ) = out + + # print( + # f'i_read_range: {i_read_range}\n' + # f'ixrng: {ixrng}\n' + # f'yrange: {yrange}\n' + # ) + viz.plot.vb._set_yrange(yrange=yrange) + + profiler(f'autoscaled overlays {chart_name}') profiler(f'<{chart_name}>.update_graphics_from_flow({name})') From f89e11fc7da5768c9a1178a73fe54a7a78857a6f Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 19 Jan 2023 10:20:29 -0500 Subject: [PATCH 043/136] Right, handle y-ranging multiple paths per plot We were hacking this before using the whole `ChartView._maxmin()` setting stuff since in some cases you might want similarly ranged paths on the same view, but of course you need to max/min them together.. This adds that group sorting by using a table of `dict[PlotItem, tuple[float, float]` and taking the abs highest/lowest value for each plot in the viz interaction update loop. Also removes the now commented signal registry calls and thus `._yranger`, drops the `set_range: bool` from `._set_yrange` and adds and extra `.maybe_downsample_graphics()` to the mouse wheel handler to avoid a weird slow debounce where ds-ing is delayed until a further interaction. --- piker/ui/_interaction.py | 170 +++++++++++++++++++++++---------------- 1 file changed, 99 insertions(+), 71 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 18b485f0ae..6ef290b73e 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -20,7 +20,6 @@ """ from __future__ import annotations from contextlib import asynccontextmanager -from functools import partial import time from typing import ( Optional, @@ -393,7 +392,6 @@ def __init__( self.setFocusPolicy(QtCore.Qt.StrongFocus) self._ic = None - self._yranger: Callable | None = None # TODO: probably just assign this whenever a new `PlotItem` is # allocated since they're 1to1 with views.. @@ -568,6 +566,7 @@ def wheelEvent( # "double work" is causing latency when these missing event # fires don't happen? self.maybe_downsample_graphics() + self.maybe_downsample_graphics() ev.accept() @@ -763,7 +762,6 @@ def _set_yrange( ms_threshold=ms_slower_then, delayed=True, ) - set_range = True chart = self._chart # view has been set in 'axis' mode @@ -772,8 +770,8 @@ def _set_yrange( # - disable autoranging # - remove any y range limits if chart._static_yrange == 'axis': - set_range = False self.setLimits(yMin=None, yMax=None) + return # static y-range has been set likely by # a specialized FSP configuration. @@ -786,48 +784,63 @@ def _set_yrange( elif yrange is not None: ylow, yhigh = yrange - if set_range: - - # XXX: only compute the mxmn range - # if none is provided as input! - if not yrange: + # XXX: only compute the mxmn range + # if none is provided as input! + if not yrange: - if not viz: - breakpoint() + if not viz: + breakpoint() - out = viz.maxmin() - if out is None: - log.warning(f'No yrange provided for {name}!?') - return - ( - ixrng, - _, - yrange - ) = out + out = viz.maxmin() + if out is None: + log.warning(f'No yrange provided for {name}!?') + return + ( + ixrng, + _, + yrange + ) = out - profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}') + profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}') - if yrange is None: - log.warning(f'No yrange provided for {name}!?') - return + if yrange is None: + log.warning(f'No yrange provided for {name}!?') + return ylow, yhigh = yrange - # view margins: stay within a % of the "true range" - diff = yhigh - ylow - ylow = ylow - (diff * range_margin) - yhigh = yhigh + (diff * range_margin) - - # XXX: this often needs to be unset - # to get different view modes to operate - # correctly! - self.setLimits( - yMin=ylow, - yMax=yhigh, - ) - self.setYRange(ylow, yhigh) - profiler(f'set limits: {(ylow, yhigh)}') + # view margins: stay within a % of the "true range" + diff = yhigh - ylow + ylow = max( + ylow - (diff * range_margin), + 0, + ) + yhigh = min( + yhigh + (diff * range_margin), + yhigh * (1 + range_margin), + ) + + # XXX: this often needs to be unset + # to get different view modes to operate + # correctly! + + profiler( + f'set limits {self.name}:\n' + f'ylow: {ylow}\n' + f'yhigh: {yhigh}\n' + ) + self.setYRange( + ylow, + yhigh, + padding=0, + ) + self.setLimits( + yMin=ylow, + yMax=yhigh, + ) + # LOL: yet anothercucking pg buggg.. + # can't use `msg=f'setYRange({ylow}, {yhigh}')` profiler.finish() def enable_auto_yrange( @@ -844,12 +857,6 @@ def enable_auto_yrange( if src_vb is None: src_vb = self - if self._yranger is None: - self._yranger = partial( - self._set_yrange, - viz=viz, - ) - # re-sampling trigger: # TODO: a smarter way to avoid calling this needlessly? # 2 things i can think of: @@ -866,25 +873,13 @@ def enable_auto_yrange( self.maybe_downsample_graphics ) - # mouse wheel doesn't emit XRangeChanged - # src_vb.sigRangeChangedManually.connect(self._yranger) - def disable_auto_yrange(self) -> None: # XXX: not entirely sure why we can't de-reg this.. self.sigResized.disconnect( - # self._yranger, self.maybe_downsample_graphics ) - # self.sigRangeChangedManually.disconnect( - # self._yranger, - # ) - - # self.sigRangeChangedManually.disconnect( - # self.maybe_downsample_graphics - # ) - def x_uppx(self) -> float: ''' Return the "number of x units" within a single @@ -908,14 +903,18 @@ def maybe_downsample_graphics( ): profiler = Profiler( msg=f'ChartView.maybe_downsample_graphics() for {self.name}', - disabled=not pg_profile_enabled(), + # disabled=not pg_profile_enabled(), + + # ms_threshold=ms_slower_then, + + disabled=True, + ms_threshold=4, # XXX: important to avoid not seeing underlying # ``.update_graphics_from_flow()`` nested profiling likely # due to the way delaying works and garbage collection of # the profiler in the delegated method calls. - ms_threshold=6, - # ms_threshold=ms_slower_then, + delayed=True, ) # TODO: a faster single-loop-iterator way of doing this XD @@ -927,20 +926,22 @@ def maybe_downsample_graphics( plots |= linked.subplots for chart_name, chart in plots.items(): - for name, viz in chart._vizs.items(): - if ( - not viz.render + mxmns: dict[ + pg.PlotItem, + tuple[float, float], + ] = {} - # XXX: super important to be aware of this. - # or not flow.graphics.isVisible() - ): + for name, viz in chart._vizs.items(): + if not viz.render: # print(f'skipping {flow.name}') continue # pass in no array which will read and render from the last # passed array (normally provided by the display loop.) i_read_range, _ = viz.update_graphics() + profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') + out = viz.maxmin(i_read_range=i_read_range) if out is None: log.warning(f'No yrange provided for {name}!?') @@ -951,13 +952,40 @@ def maybe_downsample_graphics( yrange ) = out - # print( - # f'i_read_range: {i_read_range}\n' - # f'ixrng: {ixrng}\n' - # f'yrange: {yrange}\n' - # ) - viz.plot.vb._set_yrange(yrange=yrange) + pi = viz.plot + mxmn = mxmns.get(pi) + if mxmn: + yrange = mxmns[pi] = ( + min(yrange[0], mxmn[0]), + max(yrange[1], mxmn[1]), + ) + + else: + mxmns[viz.plot] = yrange + + pi.vb._set_yrange(yrange=yrange) + profiler( + f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' + ) + + # if 'dolla_vlm' in viz.name: + # print( + # f'AUTO-Y-RANGING: {viz.name}\n' + # f'i_read_range: {i_read_range}\n' + # f'ixrng: {ixrng}\n' + # f'yrange: {yrange}\n' + # ) + # ( + # view_xrange, + # view_yrange, + # ) = viz.plot.vb.viewRange() + # print( + # f'{viz.name}@{chart_name}\n' + # f' xRange -> {view_xrange}\n' + # f' yRange -> {view_yrange}\n' + # ) profiler(f'autoscaled overlays {chart_name}') profiler(f'<{chart_name}>.update_graphics_from_flow({name})') + profiler.finish() From 8a5b9f4e8cc10fe225d03ae2e6d8bb24d119106c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 19 Jan 2023 12:00:28 -0500 Subject: [PATCH 044/136] Rename `.maybe_downsample_graphics()` -> `.interact_graphics_cycle()` --- piker/ui/_dataviz.py | 5 ++--- piker/ui/_interaction.py | 24 ++++++++++++++---------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 1e798f68f1..89e4633766 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1079,9 +1079,8 @@ def default_view( ) if do_ds: - # view.interaction_graphics_cycle() - view.maybe_downsample_graphics() - view._set_yrange(viz=self) + view.interact_graphics_cycle() + # view._set_yrange(viz=self) def incr_info( self, diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 6ef290b73e..868466c261 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -509,7 +509,7 @@ def wheelEvent( # return # actual scaling factor - s = 1.015 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor']) + s = 1.016 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor']) s = [(None if m is False else s) for m in mask] if ( @@ -565,8 +565,8 @@ def wheelEvent( # that never seems to happen? Only question is how much this # "double work" is causing latency when these missing event # fires don't happen? - self.maybe_downsample_graphics() - self.maybe_downsample_graphics() + self.interact_graphics_cycle() + self.interact_graphics_cycle() ev.accept() @@ -674,7 +674,7 @@ def mouseDragEvent( # self.sigRangeChangedManually.emit(mask) # self.state['mouseEnabled'] # ) - self.maybe_downsample_graphics() + self.interact_graphics_cycle() if ev.isFinish(): self.signal_ic() @@ -707,7 +707,7 @@ def mouseDragEvent( self.scaleBy(x=x, y=y, center=center) # self.sigRangeChangedManually.emit(self.state['mouseEnabled']) - self.maybe_downsample_graphics() + self.interact_graphics_cycle() # XXX: WHY ev.accept() @@ -865,19 +865,19 @@ def enable_auto_yrange( # - only register this when certain downsample-able graphics are # "added to scene". # src_vb.sigRangeChangedManually.connect( - # self.maybe_downsample_graphics + # self.interact_graphics_cycle # ) # widget-UIs/splitter(s) resizing src_vb.sigResized.connect( - self.maybe_downsample_graphics + self.interact_graphics_cycle ) def disable_auto_yrange(self) -> None: # XXX: not entirely sure why we can't de-reg this.. self.sigResized.disconnect( - self.maybe_downsample_graphics + self.interact_graphics_cycle ) def x_uppx(self) -> float: @@ -898,11 +898,11 @@ def x_uppx(self) -> float: else: return 0 - def maybe_downsample_graphics( + def interact_graphics_cycle( self, ): profiler = Profiler( - msg=f'ChartView.maybe_downsample_graphics() for {self.name}', + msg=f'ChartView.interact_graphics_cycle() for {self.name}', # disabled=not pg_profile_enabled(), # ms_threshold=ms_slower_then, @@ -927,6 +927,10 @@ def maybe_downsample_graphics( for chart_name, chart in plots.items(): + # Viz "group" maxmins table; presumes that some path + # graphics (and thus their backing data sets) + # are in the same co-domain and thus can be sorted + # as one set per plot. mxmns: dict[ pg.PlotItem, tuple[float, float], From dfc35253ea63bfb9a8aab7db27e31b86d00057ac Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 19 Jan 2023 14:41:17 -0500 Subject: [PATCH 045/136] First draft, group y-minmax transform algo On overlaid ohlc vizs we compute the largest max/min spread and apply that maxmimum "up and down swing" proportion to each `Viz`'s viewbox in the group. We obviously still need to clip to the shortest x-range so that it doesn't look exactly the same as before XD --- piker/ui/_interaction.py | 85 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 80 insertions(+), 5 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 868466c261..170897e1e7 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -46,6 +46,7 @@ if TYPE_CHECKING: from ._chart import ChartPlotWidget from ._dataviz import Viz + # from ._overlay import PlotItemOverlay log = get_logger(__name__) @@ -931,11 +932,18 @@ def interact_graphics_cycle( # graphics (and thus their backing data sets) # are in the same co-domain and thus can be sorted # as one set per plot. - mxmns: dict[ + mxmns_by_pi: dict[ pg.PlotItem, tuple[float, float], ] = {} + # collect certain flows into groups and do a common calc to + # determine auto-ranging input for `._set_yrange()`. + mxmn_groups: dict[ + set[Viz], + set[Viz, tuple[float, float]], + ] = {} + for name, viz in chart._vizs.items(): if not viz.render: # print(f'skipping {flow.name}') @@ -957,15 +965,19 @@ def interact_graphics_cycle( ) = out pi = viz.plot - mxmn = mxmns.get(pi) + mxmn = mxmns_by_pi.get(pi) if mxmn: - yrange = mxmns[pi] = ( + yrange = mxmns_by_pi[pi] = ( min(yrange[0], mxmn[0]), max(yrange[1], mxmn[1]), ) else: - mxmns[viz.plot] = yrange + mxmns_by_pi[pi] = yrange + + if viz.is_ohlc: + # print(f'adding {viz.name} to overlay') + mxmn_groups[viz.name] = out pi.vb._set_yrange(yrange=yrange) profiler( @@ -991,5 +1003,68 @@ def interact_graphics_cycle( profiler(f'autoscaled overlays {chart_name}') - profiler(f'<{chart_name}>.update_graphics_from_flow({name})') + profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + + # proportional group auto-scaling per overlay set. + # -> loop through overlays on each multi-chart widget + # and scale all y-ranges based on autoscale config. + group_mx: float = 0 + group_mn: float = 0 + mx_up_rng: float = 0 + mn_down_rng: float = 0 + start_datums: dict[ViewBox, float] = {} + + for viz_name, out in mxmn_groups.items(): + ( + ixrng, + read_slc, + (ymn, ymx), + ) = out + + # determine start datum in view + viz = chart._vizs[viz_name] + arr = viz.shm.array + row_start = arr[read_slc.start - 1] + # row_stop = arr[read_slc.stop - 1] + if viz.is_ohlc: + y_start = row_start['open'] + # y_stop = row_stop['close'] + else: + y_start = row_start[viz.name] + # y_stop = row_stop[viz.name] + + start_datums[viz.plot.vb] = (viz, y_start) + + # update max for group + up_rng = (ymx - y_start) / y_start + down_rng = (ymn - y_start) / y_start + + # compute directional (up/down) y-range % swing/dispersion + mx_up_rng = max(mx_up_rng, up_rng) + mn_down_rng = min(mn_down_rng, down_rng) + + # pis2ranges[pi] = (ymn, ymx) + + group_mx = max(group_mx, ymx) + group_mn = min(group_mn, ymn) + + print( + f'{viz.name}@{chart_name} group mxmn calc\n' + f'ymn: {ymn}\n' + f'ymx: {ymx}\n' + f'down %: {mx_up_rng * 100}\n' + f'up %: {mn_down_rng * 100}\n' + ) + + for view, (viz, ystart) in start_datums.items(): + ymn = ystart * (1 + mn_down_rng) + ymx = ystart * (1 + mx_up_rng) + print( + f'{view.name} APPLY group mxmn\n' + f'ystart: {ystart}\n' + f'ymn: {ymn}\n' + f'ymx: {ymx}\n' + ) + view._set_yrange(yrange=(ymn, ymx)) + profiler.finish() From 52ac1053aaeb523ffe944b1703abfe68d16098f7 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 20 Jan 2023 14:06:36 -0500 Subject: [PATCH 046/136] 2nd try: dispersion normalize y-ranges around median In the dispersion swing calcs, use the series median from the in-view data to determine swing proportions to apply on each "minor curve" (series with lesser dispersion the one with the greatest). Track the major `Viz` as before by max dispersion. Apply the dispersion swing proportions to each minor curve-series in a third loop/pass of all overlay groups: this ensures all overlays are dispersion normalized in their ranges but, minor curves are currently (vertically) centered (vs. the major) via their medians. There is a ton of commented code from attempts to try and vertically align minor curves to the major via the "first datum" in-view/available. This still needs work and we may want to offer it as optional. Also adds logic to allow skipping margin adjustments in `._set_yrange()` if you pass `range_margin=None`. --- piker/ui/_interaction.py | 238 +++++++++++++++++++++++++++++---------- 1 file changed, 180 insertions(+), 58 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 170897e1e7..5b15b9791d 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -737,7 +737,7 @@ def _set_yrange( # NOTE: this value pairs (more or less) with L1 label text # height offset from from the bid/ask lines. - range_margin: float = 0.09, + range_margin: float | None = 0.09, bars_range: Optional[tuple[int, int, int, int]] = None, @@ -811,15 +811,16 @@ def _set_yrange( ylow, yhigh = yrange # view margins: stay within a % of the "true range" - diff = yhigh - ylow - ylow = max( - ylow - (diff * range_margin), - 0, - ) - yhigh = min( - yhigh + (diff * range_margin), - yhigh * (1 + range_margin), - ) + if range_margin is not None: + diff = yhigh - ylow + ylow = max( + ylow - (diff * range_margin), + 0, + ) + yhigh = min( + yhigh + (diff * range_margin), + yhigh * (1 + range_margin), + ) # XXX: this often needs to be unset # to get different view modes to operate @@ -979,40 +980,38 @@ def interact_graphics_cycle( # print(f'adding {viz.name} to overlay') mxmn_groups[viz.name] = out - pi.vb._set_yrange(yrange=yrange) - profiler( - f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' - ) - - # if 'dolla_vlm' in viz.name: - # print( - # f'AUTO-Y-RANGING: {viz.name}\n' - # f'i_read_range: {i_read_range}\n' - # f'ixrng: {ixrng}\n' - # f'yrange: {yrange}\n' - # ) - # ( - # view_xrange, - # view_yrange, - # ) = viz.plot.vb.viewRange() - # print( - # f'{viz.name}@{chart_name}\n' - # f' xRange -> {view_xrange}\n' - # f' yRange -> {view_yrange}\n' - # ) - - profiler(f'autoscaled overlays {chart_name}') + else: + pi.vb._set_yrange(yrange=yrange) + profiler( + f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' + ) profiler(f'<{chart_name}>.interact_graphics_cycle({name})') # proportional group auto-scaling per overlay set. # -> loop through overlays on each multi-chart widget # and scale all y-ranges based on autoscale config. - group_mx: float = 0 - group_mn: float = 0 + # -> for any "group" overlay we want to dispersion normalize + # and scale minor charts onto the major chart: the chart + # with the most dispersion in the set. + major_mx: float = 0 + major_mn: float = float('inf') mx_up_rng: float = 0 mn_down_rng: float = 0 - start_datums: dict[ViewBox, float] = {} + mx_disp: float = 0 + start_datums: dict[ + ViewBox, + tuple[ + Viz, + float, # y start + float, # y min + float, # y max + float, # y median + slice, # in-view array slice + ], + ] = {} + max_start: float = 0 + major_viz: Viz = None for viz_name, out in mxmn_groups.items(): ( @@ -1021,50 +1020,173 @@ def interact_graphics_cycle( (ymn, ymx), ) = out + + x_start = ixrng[0] + max_start = max(x_start, max_start) + # determine start datum in view viz = chart._vizs[viz_name] arr = viz.shm.array + in_view = arr[read_slc] row_start = arr[read_slc.start - 1] # row_stop = arr[read_slc.stop - 1] + if viz.is_ohlc: + y_median = np.median(in_view['close']) y_start = row_start['open'] - # y_stop = row_stop['close'] else: + y_median = np.median(in_view[viz.name]) y_start = row_start[viz.name] # y_stop = row_stop[viz.name] - start_datums[viz.plot.vb] = (viz, y_start) - - # update max for group - up_rng = (ymx - y_start) / y_start - down_rng = (ymn - y_start) / y_start + start_datums[viz.plot.vb] = ( + viz, + y_start, + ymn, + ymx, + y_median, + read_slc, + ) # compute directional (up/down) y-range % swing/dispersion + y_ref = y_median + up_rng = (ymx - y_ref) / y_ref + down_rng = (ymn - y_ref) / y_ref + disp = abs(ymx - ymn) / y_ref + + # track the "major" curve as the curve with most + # dispersion. + if disp > mx_disp: + major_viz = viz + mx_disp = disp + major_mn = ymn + major_mx = ymx + mx_up_rng = max(mx_up_rng, up_rng) mn_down_rng = min(mn_down_rng, down_rng) - # pis2ranges[pi] = (ymn, ymx) - - group_mx = max(group_mx, ymx) - group_mn = min(group_mn, ymn) - print( f'{viz.name}@{chart_name} group mxmn calc\n' + f'y_start: {y_start}\n' f'ymn: {ymn}\n' f'ymx: {ymx}\n' - f'down %: {mx_up_rng * 100}\n' - f'up %: {mn_down_rng * 100}\n' + f'mx_disp: {mx_disp}\n' + f'up %: {up_rng * 100}\n' + f'down %: {down_rng * 100}\n' + f'mx up %: {mx_up_rng * 100}\n' + f'mn down %: {mn_down_rng * 100}\n' ) - for view, (viz, ystart) in start_datums.items(): - ymn = ystart * (1 + mn_down_rng) - ymx = ystart * (1 + mx_up_rng) - print( - f'{view.name} APPLY group mxmn\n' - f'ystart: {ystart}\n' - f'ymn: {ymn}\n' - f'ymx: {ymx}\n' + for ( + view, + ( + viz, + y_start, + y_min, + y_max, + y_median, + read_slc, + ) + ) in start_datums.items(): + + # TODO: just use y_min / y_max directly for the major + # `Viz` instead of the below calc since it should be the + # same output.. + symn = y_median * (1 + mn_down_rng) + symx = y_median * (1 + mx_up_rng) + + if not (viz is major_viz): + + # compute dispersion normed offsets at the start + # index of the smaller dispersion curve. + maj_viz_arr = major_viz.shm.array + + key = 'open' if viz.is_ohlc else viz.name + + # handle case where major (dispersion) curve has + # a smaller domain then minor one(s). + istart = read_slc.start + if read_slc.start > maj_viz_arr.size: + istart = 0 + + maj_start_y = maj_viz_arr[istart][key] + + maj_start_offset = maj_start_y / major_mn + maj_max_offset = major_mx / major_mn + + # XXX: or this? + # maj_start_offset = (maj_start_y - major_mn) / major_mn + # maj_max_offset = (major_mx - maj_start_y) / major_mn + + # XXX: or this? + # major_disp_offset = ( + # (maj_viz_arr[istart][key] - major_mn) + # / + # major_mn + # ) + # minor_disp_offset_mn = ( + # (y_start - y_min) + # / + # y_min + # ) + # minor_disp_offset_mx = ( + # (ymx - y_start) + # / + # y_min + + # normed_disp_ratio = minor_disp_offset - major_disp_offset + + + # adjust mxmn range to align curve start point in + # the minor overlay with the major one. + + # symn = symn * (1 + normed_disp_ratio) + # symx = symx * (1 + normed_disp_ratio) + + # symn = symn - (symn * normed_disp_ratio) + # symx = symx - (symn * normed_disp_ratio) + + # symn = y_min * maj_start_offset + # symx = y_min * maj_max_offset + + print( + f'{view.name} APPLY group mxmn\n' + # f'disp offset ratio diff %: {normed_disp_ratio}\n' + # f'major disp offset %: {major_disp_offset}\n' + # f'minor disp offset %: {minor_disp_offset}\n' + f'y_start: {y_start}\n' + f'mn_down_rng: {mn_down_rng * 100}\n' + f'mx_up_rng: {mx_up_rng * 100}\n' + f'scaled ymn: {symn}\n' + f'scaled ymx: {symx}\n' + f'scaled mx_disp: {mx_disp}\n' + ) + + view._set_yrange( + yrange=(symn, symx), + # range_margin=None, ) - view._set_yrange(yrange=(ymn, ymx)) + + # if 'mnq' in viz.name: + # print( + # f'AUTO-Y-RANGING: {viz.name}\n' + # f'i_read_range: {i_read_range}\n' + # f'ixrng: {ixrng}\n' + # f'yrange: {yrange}\n' + # ) + # ( + # view_xrange, + # view_yrange, + # ) = viz.plot.vb.viewRange() + # view_ymx = view_yrange[1] + # print( + # f'{viz.name}@{chart_name}\n' + # f' xRange -> {view_xrange}\n' + # f' yRange -> {view_yrange}\n' + # f' view y-max -> {view_ymx}\n' + # ) + + # if view_ymx != symx: + # breakpoint() profiler.finish() From 052ce65682092e1e44b93c6ff652463c37c4cebb Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 20 Jan 2023 18:46:44 -0500 Subject: [PATCH 047/136] 3rdz the charm: log-linearize minor y-ranges to a major In very close manner to the original (gut instinct) attempt, this properly (y-axis-vertically) aligns and scales overlaid curves according to what we are calling a "log-linearized y-range multi-plot" B) The basic idea is that a simple returns measure (eg. `R = (p1 - p0) / p0`) applied to all curves gives a constant output `R` no matter the price co-domain in use and thus gives a constant returns over all assets in view styled scaling; a intuitive visual of returns correlation. The reference point is for now the left-most point in view (or highest common index available to all curves), though we can make this a parameter based on user needs. A slew of debug `print()`s are left in for now until we iron out the remaining edge cases to do with re-scaling a major (dispersion) curve based on a minor now requiring a larger log-linear y-range from that previous major' range. --- piker/ui/_interaction.py | 213 ++++++++++++++++++++++----------------- 1 file changed, 118 insertions(+), 95 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 5b15b9791d..08530bb0b2 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -20,6 +20,7 @@ """ from __future__ import annotations from contextlib import asynccontextmanager +import math import time from typing import ( Optional, @@ -988,6 +989,13 @@ def interact_graphics_cycle( profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + # if no overlays, set lone chart's yrange and short circuit + if len(mxmn_groups) < 2: + viz.plot.vb._set_yrange( + yrange=yrange, + ) + return + # proportional group auto-scaling per overlay set. # -> loop through overlays on each multi-chart widget # and scale all y-ranges based on autoscale config. @@ -1008,6 +1016,7 @@ def interact_graphics_cycle( float, # y max float, # y median slice, # in-view array slice + np.ndarray, # in-view array ], ] = {} max_start: float = 0 @@ -1020,7 +1029,6 @@ def interact_graphics_cycle( (ymn, ymx), ) = out - x_start = ixrng[0] max_start = max(x_start, max_start) @@ -1032,27 +1040,28 @@ def interact_graphics_cycle( # row_stop = arr[read_slc.stop - 1] if viz.is_ohlc: - y_median = np.median(in_view['close']) + y_med = np.median(in_view['close']) y_start = row_start['open'] else: - y_median = np.median(in_view[viz.name]) + y_med = np.median(in_view[viz.name]) y_start = row_start[viz.name] # y_stop = row_stop[viz.name] + print( + f'{viz.name} -> (x_start: {x_start}, y_start: {y_start}\n' + ) start_datums[viz.plot.vb] = ( viz, y_start, ymn, ymx, - y_median, + y_med, read_slc, + in_view, ) - # compute directional (up/down) y-range % swing/dispersion - y_ref = y_median - up_rng = (ymx - y_ref) / y_ref - down_rng = (ymn - y_ref) / y_ref - disp = abs(ymx - ymn) / y_ref + # find curve with max dispersion + disp = abs(ymx - ymn) / y_med # track the "major" curve as the curve with most # dispersion. @@ -1062,17 +1071,26 @@ def interact_graphics_cycle( major_mn = ymn major_mx = ymx + # compute directional (up/down) y-range % swing/dispersion + y_ref = y_med + up_rng = (ymx - y_ref) / y_ref + down_rng = (ymn - y_ref) / y_ref + mx_up_rng = max(mx_up_rng, up_rng) mn_down_rng = min(mn_down_rng, down_rng) print( + '####################\n' f'{viz.name}@{chart_name} group mxmn calc\n' + '--------------------\nn' f'y_start: {y_start}\n' f'ymn: {ymn}\n' f'ymx: {ymx}\n' f'mx_disp: {mx_disp}\n' + '####################\n' f'up %: {up_rng * 100}\n' f'down %: {down_rng * 100}\n' + '####################\n' f'mx up %: {mx_up_rng * 100}\n' f'mn down %: {mn_down_rng * 100}\n' ) @@ -1084,109 +1102,114 @@ def interact_graphics_cycle( y_start, y_min, y_max, - y_median, + y_med, read_slc, + minor_in_view, ) ) in start_datums.items(): - # TODO: just use y_min / y_max directly for the major - # `Viz` instead of the below calc since it should be the - # same output.. - symn = y_median * (1 + mn_down_rng) - symx = y_median * (1 + mx_up_rng) - - if not (viz is major_viz): - - # compute dispersion normed offsets at the start - # index of the smaller dispersion curve. - maj_viz_arr = major_viz.shm.array + # we use the ymn/mx verbatim from the major curve + # (i.e. the curve measured to have the highest + # dispersion in view). + if viz is major_viz: + ymn = y_min + ymx = y_max + else: key = 'open' if viz.is_ohlc else viz.name - # handle case where major (dispersion) curve has - # a smaller domain then minor one(s). - istart = read_slc.start - if read_slc.start > maj_viz_arr.size: - istart = 0 - - maj_start_y = maj_viz_arr[istart][key] - - maj_start_offset = maj_start_y / major_mn - maj_max_offset = major_mx / major_mn - - # XXX: or this? - # maj_start_offset = (maj_start_y - major_mn) / major_mn - # maj_max_offset = (major_mx - maj_start_y) / major_mn - - # XXX: or this? - # major_disp_offset = ( - # (maj_viz_arr[istart][key] - major_mn) - # / - # major_mn - # ) - # minor_disp_offset_mn = ( - # (y_start - y_min) - # / - # y_min - # ) - # minor_disp_offset_mx = ( - # (ymx - y_start) - # / - # y_min - - # normed_disp_ratio = minor_disp_offset - major_disp_offset - - - # adjust mxmn range to align curve start point in - # the minor overlay with the major one. - - # symn = symn * (1 + normed_disp_ratio) - # symx = symx * (1 + normed_disp_ratio) - - # symn = symn - (symn * normed_disp_ratio) - # symx = symx - (symn * normed_disp_ratio) - - # symn = y_min * maj_start_offset - # symx = y_min * maj_max_offset + # handle case where major and minor curve(s) have + # a disjoint x-domain (one curve is smaller in + # length then the other): + # - find the highest (time) index common to both + # curves. + # - slice out the first "intersecting" y-value from + # both curves for use in log-linear scaling such + # that the intersecting y-value is used as the + # reference point for scaling minor curve's + # y-range based on the major curves y-range. + abs_ifirst = minor_in_view[0]['index'] + mshm = major_viz.shm + abs_i_start = max( + abs_ifirst, + mshm.array['index'][0], + ) + # get intersection point y-values for both curves + y_maj_intersect = mshm._array[abs_i_start][key] + y_min_intersect = minor_in_view[abs_i_start - abs_ifirst] + + # TODO: probably write this as a compile cpython or + # numba func. + + # compute directional (up/down) y-range + # % swing/dispersion starting at the reference index + # determined by the above indexing arithmetic. + y_ref = y_maj_intersect + assert y_ref + r_up = (major_mx - y_ref) / y_ref + r_down = (major_mn - y_ref) / y_ref + ymn = y_start * (1 + r_down) + ymx = y_start * (1 + r_up) + + # XXX: handle out of view cases where minor curve + # now is outside the range of the major curve. in + # this case we then re-scale the major curve to + # include the range missing now enforced by the + # minor (now new major for this *side*). Note this + # is side (up/down) specific. + new_maj_mxmn: None | tuple[float, float] = None + if y_max > ymx: + y_ref = y_min_intersect[key] + r_up_minor = (y_max - y_ref) / y_ref + new_maj_ymx = y_maj_intersect * (1 + r_up_minor) + new_maj_mxmn = (major_mn, new_maj_ymx) + ymx = y_max + + print( + f'{view.name} OUT OF RANGE:\n' + f'MAJOR is {major_viz.name}\n' + f'y_max:{y_max} > ymx:{ymx}\n' + ) + + if y_min < ymn: + y_ref = y_min_intersect[key] + r_down_minor = (y_min - y_ref) / y_ref + new_maj_ymn = y_maj_intersect * (1 + r_down_minor) + new_maj_mxmn = ( + new_maj_ymn, + new_maj_ymx[1] if new_maj_mxmn else major_mx + ) + ymn = y_min + + print( + f'{view.name} OUT OF RANGE:\n' + f'MAJOR is {major_viz.name}\n' + f'y_min:{y_min} < ymn:{ymn}\n' + ) + + if new_maj_mxmn: + major_viz.plot.vb._set_yrange( + yrange=new_maj_mxmn, + ) print( f'{view.name} APPLY group mxmn\n' - # f'disp offset ratio diff %: {normed_disp_ratio}\n' - # f'major disp offset %: {major_disp_offset}\n' - # f'minor disp offset %: {minor_disp_offset}\n' f'y_start: {y_start}\n' f'mn_down_rng: {mn_down_rng * 100}\n' f'mx_up_rng: {mx_up_rng * 100}\n' - f'scaled ymn: {symn}\n' - f'scaled ymx: {symx}\n' + f'scaled ymn: {ymn}\n' + f'scaled ymx: {ymx}\n' f'scaled mx_disp: {mx_disp}\n' ) + if ( + math.isinf(ymx) + or math.isinf(ymn) + ): + breakpoint() + view._set_yrange( - yrange=(symn, symx), - # range_margin=None, + yrange=(ymn, ymx), ) - # if 'mnq' in viz.name: - # print( - # f'AUTO-Y-RANGING: {viz.name}\n' - # f'i_read_range: {i_read_range}\n' - # f'ixrng: {ixrng}\n' - # f'yrange: {yrange}\n' - # ) - # ( - # view_xrange, - # view_yrange, - # ) = viz.plot.vb.viewRange() - # view_ymx = view_yrange[1] - # print( - # f'{viz.name}@{chart_name}\n' - # f' xRange -> {view_xrange}\n' - # f' yRange -> {view_yrange}\n' - # f' view y-max -> {view_ymx}\n' - # ) - - # if view_ymx != symx: - # breakpoint() - profiler.finish() From 0591cb09f6b9364e20dcf79b2b944a34c96c7788 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 17:31:31 -0500 Subject: [PATCH 048/136] Clean up cross-curve intersect point indexing When there are `N`-curves we need to consider the smallest x-data-support subset when figuring out for each major-minor pair such that the "shorter" series is always returns aligned to the longer one. This makes the var naming more explicit with `major/minor_i_start` as well as clarifies more stringently a bunch of other variables and explicitly uses the `minor_y_intersect` y value in the scaling transform calcs. Also fixes some debug prints. --- piker/ui/_interaction.py | 95 +++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 40 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 08530bb0b2..ab5542c365 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -930,17 +930,20 @@ def interact_graphics_cycle( for chart_name, chart in plots.items(): - # Viz "group" maxmins table; presumes that some path - # graphics (and thus their backing data sets) - # are in the same co-domain and thus can be sorted - # as one set per plot. - mxmns_by_pi: dict[ + # Common `PlotItem` maxmin table; presumes that some path + # graphics (and thus their backing data sets) are in the + # same co-domain and view box (since the were added + # a separate graphics objects to a common plot) and thus can + # be sorted as one set per plot. + mxmns_by_common_pi: dict[ pg.PlotItem, tuple[float, float], ] = {} - # collect certain flows into groups and do a common calc to + # collect certain flows have grapics objects **in seperate + # plots/viewboxes** into groups and do a common calc to # determine auto-ranging input for `._set_yrange()`. + # this is primarly used for our so called "log-linearized mxmn_groups: dict[ set[Viz], set[Viz, tuple[float, float]], @@ -967,15 +970,15 @@ def interact_graphics_cycle( ) = out pi = viz.plot - mxmn = mxmns_by_pi.get(pi) + mxmn = mxmns_by_common_pi.get(pi) if mxmn: - yrange = mxmns_by_pi[pi] = ( + yrange = mxmns_by_common_pi[pi] = ( min(yrange[0], mxmn[0]), max(yrange[1], mxmn[1]), ) else: - mxmns_by_pi[pi] = yrange + mxmns_by_common_pi[pi] = yrange if viz.is_ohlc: # print(f'adding {viz.name} to overlay') @@ -1019,7 +1022,7 @@ def interact_graphics_cycle( np.ndarray, # in-view array ], ] = {} - max_start: float = 0 + max_istart: float = 0 major_viz: Viz = None for viz_name, out in mxmn_groups.items(): @@ -1029,15 +1032,13 @@ def interact_graphics_cycle( (ymn, ymx), ) = out - x_start = ixrng[0] - max_start = max(x_start, max_start) - # determine start datum in view viz = chart._vizs[viz_name] arr = viz.shm.array in_view = arr[read_slc] row_start = arr[read_slc.start - 1] - # row_stop = arr[read_slc.stop - 1] + + max_istart = max(in_view[0]['index'], max_istart) if viz.is_ohlc: y_med = np.median(in_view['close']) @@ -1045,11 +1046,12 @@ def interact_graphics_cycle( else: y_med = np.median(in_view[viz.name]) y_start = row_start[viz.name] - # y_stop = row_stop[viz.name] - print( - f'{viz.name} -> (x_start: {x_start}, y_start: {y_start}\n' - ) + # x_start = ixrng[0] + # print( + # f'{viz.name} ->\n' + # f'(x_start: {x_start}, y_start: {y_start}\n' + # ) start_datums[viz.plot.vb] = ( viz, y_start, @@ -1080,17 +1082,14 @@ def interact_graphics_cycle( mn_down_rng = min(mn_down_rng, down_rng) print( - '####################\n' f'{viz.name}@{chart_name} group mxmn calc\n' - '--------------------\nn' + '--------------------\n' f'y_start: {y_start}\n' f'ymn: {ymn}\n' f'ymx: {ymx}\n' f'mx_disp: {mx_disp}\n' - '####################\n' f'up %: {up_rng * 100}\n' f'down %: {down_rng * 100}\n' - '####################\n' f'mx up %: {mx_up_rng * 100}\n' f'mn down %: {mn_down_rng * 100}\n' ) @@ -1128,28 +1127,39 @@ def interact_graphics_cycle( # that the intersecting y-value is used as the # reference point for scaling minor curve's # y-range based on the major curves y-range. - abs_ifirst = minor_in_view[0]['index'] + + # get intersection point y-values for both curves + # abs_i_start = max_istart + mshm = major_viz.shm + minor_i_start = minor_in_view[0]['index'] + major_i_start = mshm.array['index'][0], abs_i_start = max( - abs_ifirst, - mshm.array['index'][0], + minor_i_start, + major_i_start, ) - # get intersection point y-values for both curves + y_maj_intersect = mshm._array[abs_i_start][key] - y_min_intersect = minor_in_view[abs_i_start - abs_ifirst] + y_minor_intersect = viz.shm._array[abs_i_start][key] # TODO: probably write this as a compile cpython or # numba func. + # if abs_i_start > major_i_start: + # compute directional (up/down) y-range # % swing/dispersion starting at the reference index # determined by the above indexing arithmetic. y_ref = y_maj_intersect - assert y_ref + if not y_ref: + breakpoint() + r_up = (major_mx - y_ref) / y_ref r_down = (major_mn - y_ref) / y_ref - ymn = y_start * (1 + r_down) - ymx = y_start * (1 + r_up) + + minor_y_start = y_minor_intersect + ymn = minor_y_start * (1 + r_down) + ymx = minor_y_start * (1 + r_up) # XXX: handle out of view cases where minor curve # now is outside the range of the major curve. in @@ -1159,42 +1169,47 @@ def interact_graphics_cycle( # is side (up/down) specific. new_maj_mxmn: None | tuple[float, float] = None if y_max > ymx: - y_ref = y_min_intersect[key] + y_ref = y_minor_intersect r_up_minor = (y_max - y_ref) / y_ref new_maj_ymx = y_maj_intersect * (1 + r_up_minor) new_maj_mxmn = (major_mn, new_maj_ymx) - ymx = y_max - print( f'{view.name} OUT OF RANGE:\n' - f'MAJOR is {major_viz.name}\n' + '--------------------\n' f'y_max:{y_max} > ymx:{ymx}\n' + f'RESCALE MAJOR {major_viz.name}:\n' + f'{new_maj_mxmn}\n' ) + ymx = y_max if y_min < ymn: - y_ref = y_min_intersect[key] + y_ref = y_minor_intersect r_down_minor = (y_min - y_ref) / y_ref new_maj_ymn = y_maj_intersect * (1 + r_down_minor) new_maj_mxmn = ( new_maj_ymn, - new_maj_ymx[1] if new_maj_mxmn else major_mx + new_maj_mxmn[1] if new_maj_mxmn else major_mx ) - ymn = y_min - print( f'{view.name} OUT OF RANGE:\n' - f'MAJOR is {major_viz.name}\n' + '--------------------\n' f'y_min:{y_min} < ymn:{ymn}\n' + f'RESCALE MAJOR {major_viz.name}:\n' + f'{new_maj_mxmn}\n' ) + ymn = y_min if new_maj_mxmn: + major_mx, major_mn = new_maj_mxmn major_viz.plot.vb._set_yrange( yrange=new_maj_mxmn, + # range_margin=None, ) print( f'{view.name} APPLY group mxmn\n' - f'y_start: {y_start}\n' + '--------------------\n' + f'minor_y_start: {minor_y_start}\n' f'mn_down_rng: {mn_down_rng * 100}\n' f'mx_up_rng: {mx_up_rng * 100}\n' f'scaled ymn: {ymn}\n' From 7e421ba57b4bd92098dfdf1ccfebec4475ed9e51 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 17:58:10 -0500 Subject: [PATCH 049/136] Drop `.group_maxmin()` We ended up doing groups maxmin sorting at the interaction layer (new the view box) and thus this method is no longer needed, though it was the reference for the code now in `ChartView.interact_graphics_cycle()`. Further this adds a `remove_axes: bool` arg to `.insert_plotitem()` which can be used to drop axis entries from the inserted pi (though it doesn't seem like we really ever need that?) and does the removal in a separate loop to avoid removing axes before they are registered in `ComposedGridLayout._pi2axes`. --- piker/ui/_overlay.py | 118 ++++++++----------------------------------- 1 file changed, 20 insertions(+), 98 deletions(-) diff --git a/piker/ui/_overlay.py b/piker/ui/_overlay.py index a17c1173d9..ad11c5e490 100644 --- a/piker/ui/_overlay.py +++ b/piker/ui/_overlay.py @@ -115,6 +115,7 @@ def __init__( layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) + layout.setMinimumWidth(0) if name in ('top', 'bottom'): orient = Qt.Vertical @@ -124,7 +125,11 @@ def __init__( layout.setOrientation(orient) - self.insert_plotitem(0, pi) + self.insert_plotitem( + 0, + pi, + remove_axes=False, + ) # insert surrounding linear layouts into the parent pi's layout # such that additional axes can be appended arbitrarily without @@ -139,7 +144,9 @@ def __init__( assert linlayout.itemAt(0) is axis # XXX: see comment in ``.insert_plotitem()``... + # our `PlotItem.removeAxis()` does this internally. # pi.layout.removeItem(axis) + pi.layout.addItem(linlayout, *index) layout = pi.layout.itemAt(*index) assert layout is linlayout @@ -164,6 +171,8 @@ def insert_plotitem( index: int, plotitem: PlotItem, + remove_axes: bool = False, + ) -> tuple[int, list[AxisItem]]: ''' Place item at index by inserting all axes into the grid @@ -205,13 +214,6 @@ def insert_plotitem( ): continue - # XXX: Remove old axis? - # No, turns out we don't need this? - # DON'T UNLINK IT since we need the original ``ViewBox`` to - # still drive it with events/handlers B) - # popped = plotitem.removeAxis(name, unlink=False) - # assert axis is popped - # invert insert index for layouts which are # not-left-to-right, top-to-bottom insert oriented insert_index = index @@ -224,6 +226,16 @@ def insert_plotitem( self._register_item(index, plotitem) + if remove_axes: + for name, axis_info in plotitem.axes.copy().items(): + axis = axis_info['item'] + # XXX: Remove old axis? + # No, turns out we don't need this? + # DON'T UNLINK IT since we need the original ``ViewBox`` to + # still drive it with events/handlers B) + popped = plotitem.removeAxis(name, unlink=False) + assert axis is popped + return (index, inserted_axes) def append_plotitem( @@ -577,93 +589,3 @@ def _disconnect_all( # ''' # ... - - def group_maxmin( - self, - focus_around: str | None = None, - force_min: float | None = None, - - ) -> tuple[ - float, # mn - float, # mx - float, # max range in % terms of highest sigma plot's y-range - PlotItem, # front/selected plot - ]: - ''' - Overlay "group" maxmin sorting. - - Assumes all named flows are in the same co-domain and thus can - be sorted as one set. - - Iterates all the named flows and calls the chart api to find - their range values and return. - - TODO: really we should probably have a more built-in API for - this? - - ''' - # TODO: - # - use this in the ``.ui._fsp`` mutli-maxmin stuff - # - - - # force 0 to always be in view - group_mx: float = 0 - group_mn: float = 0 - mx_up_rng: float = 0 - mn_down_rng: float = 0 - pis2ranges: dict[ - PlotItem, - tuple[float, float], - ] = {} - - for pi in self.overlays: - - # TODO: can we remove this from the widget - # and place somewhere more related to UX/Viz? - # name = pi.name - # chartw = pi.chart_widget - viz = pi.viz - # viz = chartw._vizs[name] - - out = viz.maxmin() - if out is None: - return None - - ( - (x_start, x_stop), - read_slc, - (ymn, ymx), - ) = out - - arr = viz.shm.array - - y_start = arr[read_slc.start - 1] - y_stop = arr[read_slc.stop - 1] - if viz.is_ohlc: - y_start = y_start['open'] - y_stop = y_stop['close'] - else: - y_start = y_start[viz.name] - y_stop = y_stop[viz.name] - - # update max for group - up_rng = (ymx - y_start) / y_start - down_rng = (y_stop - ymn) / y_stop - - # compute directional (up/down) y-range % swing/dispersion - mx_up_rng = max(mx_up_rng, up_rng) - mn_down_rng = min(mn_down_rng, down_rng) - - pis2ranges[pi] = (ymn, ymx) - - group_mx = max(group_mx, ymx) - if force_min is None: - group_mn = min(group_mn, ymn) - - return ( - group_mn if force_min is None else force_min, - group_mx, - mn_down_rng, - mx_up_rng, - pis2ranges, - ) From c2dd255e8a7d2fbdd4e50908cd7adffbae7c89a9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 18:17:04 -0500 Subject: [PATCH 050/136] Only remove axis from scene when in one --- piker/ui/_pg_overrides.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py index b7c0b9aa95..53ed5405c6 100644 --- a/piker/ui/_pg_overrides.py +++ b/piker/ui/_pg_overrides.py @@ -91,7 +91,7 @@ def __init__( title=None, viewBox=None, axisItems=None, - default_axes=['left', 'bottom'], + default_axes=['right', 'bottom'], enableMenu=True, **kargs ): @@ -130,7 +130,7 @@ def removeAxis( If the ``unlink: bool`` is set to ``False`` then the axis will stay linked to its view and will only be removed from the - layoutonly be removed from the layout. + layout. If no axis with ``name: str`` is found then this is a noop. @@ -144,7 +144,10 @@ def removeAxis( axis = entry['item'] self.layout.removeItem(axis) - axis.scene().removeItem(axis) + scn = axis.scene() + if scn: + scn.removeItem(axis) + if unlink: axis.unlinkFromView() From fc6ccc306cba1f4965b02b6c9f3a1cc222847698 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 22:19:33 -0500 Subject: [PATCH 051/136] Only set the major curve's range once (per render cycle) Turns out this is a limitation of the `ViewBox.setYRange()` api: you can't call it more then once and expect anything but the first call to be applied without letting a render cycle run. As such, we wait until the end of the log-linear scaling loop to finally apply the major curves y-mx/mn after all minor curves have been evaluated. This also drops all the debug prints (for now) to get a feel for latency in production mode. --- piker/ui/_interaction.py | 159 ++++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 53 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index ab5542c365..cbec575945 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -827,11 +827,11 @@ def _set_yrange( # to get different view modes to operate # correctly! - profiler( - f'set limits {self.name}:\n' - f'ylow: {ylow}\n' - f'yhigh: {yhigh}\n' - ) + # print( + # f'set limits {self.name}:\n' + # f'ylow: {ylow}\n' + # f'yhigh: {yhigh}\n' + # ) self.setYRange( ylow, yhigh, @@ -841,6 +841,7 @@ def _set_yrange( yMin=ylow, yMax=yhigh, ) + self.update() # LOL: yet anothercucking pg buggg.. # can't use `msg=f'setYRange({ylow}, {yhigh}')` @@ -993,7 +994,9 @@ def interact_graphics_cycle( profiler(f'<{chart_name}>.interact_graphics_cycle({name})') # if no overlays, set lone chart's yrange and short circuit - if len(mxmn_groups) < 2: + if ( + len(mxmn_groups) < 2 + ): viz.plot.vb._set_yrange( yrange=yrange, ) @@ -1024,6 +1027,7 @@ def interact_graphics_cycle( ] = {} max_istart: float = 0 major_viz: Viz = None + # major_in_view: np.ndarray = None for viz_name, out in mxmn_groups.items(): ( @@ -1072,6 +1076,7 @@ def interact_graphics_cycle( mx_disp = disp major_mn = ymn major_mx = ymx + # major_in_view = in_view # compute directional (up/down) y-range % swing/dispersion y_ref = y_med @@ -1081,18 +1086,18 @@ def interact_graphics_cycle( mx_up_rng = max(mx_up_rng, up_rng) mn_down_rng = min(mn_down_rng, down_rng) - print( - f'{viz.name}@{chart_name} group mxmn calc\n' - '--------------------\n' - f'y_start: {y_start}\n' - f'ymn: {ymn}\n' - f'ymx: {ymx}\n' - f'mx_disp: {mx_disp}\n' - f'up %: {up_rng * 100}\n' - f'down %: {down_rng * 100}\n' - f'mx up %: {mx_up_rng * 100}\n' - f'mn down %: {mn_down_rng * 100}\n' - ) + # print( + # f'{viz.name}@{chart_name} group mxmn calc\n' + # '--------------------\n' + # f'y_start: {y_start}\n' + # f'ymn: {ymn}\n' + # f'ymx: {ymx}\n' + # f'mx_disp: {mx_disp}\n' + # f'up %: {up_rng * 100}\n' + # f'down %: {down_rng * 100}\n' + # f'mx up %: {mx_up_rng * 100}\n' + # f'mn down %: {mn_down_rng * 100}\n' + # ) for ( view, @@ -1113,6 +1118,14 @@ def interact_graphics_cycle( if viz is major_viz: ymn = y_min ymx = y_max + # print( + # f'{view.name} MAJOR mxmn\n' + # '--------------------\n' + # f'scaled ymn: {ymn}\n' + # f'scaled ymx: {ymx}\n' + # f'scaled mx_disp: {mx_disp}\n' + # ) + continue else: key = 'open' if viz.is_ohlc else viz.name @@ -1129,11 +1142,11 @@ def interact_graphics_cycle( # y-range based on the major curves y-range. # get intersection point y-values for both curves - # abs_i_start = max_istart - mshm = major_viz.shm + minor_i_start = minor_in_view[0]['index'] - major_i_start = mshm.array['index'][0], + major_i_start = mshm.array['index'][0] + abs_i_start = max( minor_i_start, major_i_start, @@ -1169,53 +1182,75 @@ def interact_graphics_cycle( # is side (up/down) specific. new_maj_mxmn: None | tuple[float, float] = None if y_max > ymx: + y_ref = y_minor_intersect r_up_minor = (y_max - y_ref) / y_ref - new_maj_ymx = y_maj_intersect * (1 + r_up_minor) + + # y_maj_ref = max( + # major_in_view[0][key], + # y_maj_intersect, + # ) + y_maj_ref = y_maj_intersect + new_maj_ymx = y_maj_ref * (1 + r_up_minor) new_maj_mxmn = (major_mn, new_maj_ymx) - print( - f'{view.name} OUT OF RANGE:\n' - '--------------------\n' - f'y_max:{y_max} > ymx:{ymx}\n' - f'RESCALE MAJOR {major_viz.name}:\n' - f'{new_maj_mxmn}\n' - ) + # print( + # f'{view.name} OUT OF RANGE:\n' + # '--------------------\n' + # f'y_max:{y_max} > ymx:{ymx}\n' + # ) ymx = y_max if y_min < ymn: + y_ref = y_minor_intersect r_down_minor = (y_min - y_ref) / y_ref - new_maj_ymn = y_maj_intersect * (1 + r_down_minor) + + # y_maj_ref = min( + # major_in_view[0][key], + # y_maj_intersect, + # ) + y_maj_ref = y_maj_intersect + new_maj_ymn = y_maj_ref * (1 + r_down_minor) new_maj_mxmn = ( new_maj_ymn, new_maj_mxmn[1] if new_maj_mxmn else major_mx ) - print( - f'{view.name} OUT OF RANGE:\n' - '--------------------\n' - f'y_min:{y_min} < ymn:{ymn}\n' - f'RESCALE MAJOR {major_viz.name}:\n' - f'{new_maj_mxmn}\n' - ) + # print( + # f'{view.name} OUT OF RANGE:\n' + # '--------------------\n' + # f'y_min:{y_min} < ymn:{ymn}\n' + # ) ymn = y_min - if new_maj_mxmn: - major_mx, major_mn = new_maj_mxmn - major_viz.plot.vb._set_yrange( - yrange=new_maj_mxmn, - # range_margin=None, - ) + # now scale opposite side to compensate + # y_ref = y_major_intersect + # r_down_minor = (major_ - y_ref) / y_ref - print( - f'{view.name} APPLY group mxmn\n' - '--------------------\n' - f'minor_y_start: {minor_y_start}\n' - f'mn_down_rng: {mn_down_rng * 100}\n' - f'mx_up_rng: {mx_up_rng * 100}\n' - f'scaled ymn: {ymn}\n' - f'scaled ymx: {ymx}\n' - f'scaled mx_disp: {mx_disp}\n' - ) + if new_maj_mxmn: + # print( + # f'RESCALE MAJOR {major_viz.name}:\n' + # f'previous: {(major_mn, major_mx)}\n' + # f'new: {new_maj_mxmn}\n' + # ) + # major_viz.plot.vb._set_yrange( + # yrange=new_maj_mxmn, + # # range_margin=None, + # ) + major_mn, major_mx = new_maj_mxmn + # vrs = major_viz.plot.vb.viewRange() + # if vrs[1][0] > new_maj_mxmn[0]: + # breakpoint() + + # print( + # f'{view.name} APPLY group mxmn\n' + # '--------------------\n' + # f'minor_y_start: {minor_y_start}\n' + # f'mn_down_rng: {mn_down_rng * 100}\n' + # f'mx_up_rng: {mx_up_rng * 100}\n' + # f'scaled ymn: {ymn}\n' + # f'scaled ymx: {ymx}\n' + # f'scaled mx_disp: {mx_disp}\n' + # ) if ( math.isinf(ymx) @@ -1227,4 +1262,22 @@ def interact_graphics_cycle( yrange=(ymn, ymx), ) + # NOTE XXX: we have to set the major curve's range once (and + # only once) here since we're doing this entire routine + # inside of a single render cycle (and apparently calling + # `ViewBox.setYRange()` multiple times within one only takes + # the first call as serious...) XD + # print( + # f'Scale MAJOR {major_viz.name}:\n' + # f'previous: {(major_mn, major_mx)}\n' + # f'new: {new_maj_mxmn}\n' + # ) + major_viz.plot.vb._set_yrange( + yrange=(major_mn, major_mx), + ) + # major_mx, major_mn = new_maj_mxmn + # vrs = major_viz.plot.vb.viewRange() + # if vrs[1][0] > major_mn: + # breakpoint() + profiler.finish() From a0fb84f55bba2f48a53ab695ca5669c49e526953 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 22:28:18 -0500 Subject: [PATCH 052/136] Just warn log on bad intersect indexing errors (for now) --- piker/ui/_interaction.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index cbec575945..3679938310 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -1165,7 +1165,10 @@ def interact_graphics_cycle( # determined by the above indexing arithmetic. y_ref = y_maj_intersect if not y_ref: - breakpoint() + log.warning( + f'BAD y_maj_intersect?!: {y_maj_intersect}' + ) + # breakpoint() r_up = (major_mx - y_ref) / y_ref r_down = (major_mn - y_ref) / y_ref @@ -1256,7 +1259,11 @@ def interact_graphics_cycle( math.isinf(ymx) or math.isinf(ymn) ): - breakpoint() + # breakpoint() + log.warning( + f'BAD ymx/ymn: {(ymn, ymx)}' + ) + continue view._set_yrange( yrange=(ymn, ymx), From 32f21dc06b161fe6e76b8ff880ca01f52d934b82 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 22 Jan 2023 15:31:26 -0500 Subject: [PATCH 053/136] Drop `update_graphics_from_flow()` --- piker/ui/_chart.py | 18 ------------------ piker/ui/_fsp.py | 4 +--- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 78a20d9dde..88ac871bb9 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1280,24 +1280,6 @@ def draw_ohlc( **draw_curve_kwargs, ) - def update_graphics_from_flow( - self, - graphics_name: str, - array_key: Optional[str] = None, - - **kwargs, - - ) -> pg.GraphicsObject: - ''' - Update the named internal graphics from ``array``. - - ''' - viz = self._vizs[array_key or graphics_name] - return viz.update_graphics( - array_key=array_key, - **kwargs, - ) - # TODO: pretty sure we can just call the cursor # directly not? i don't wee why we need special "signal proxies" # for this lul.. diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 2e2e76c1c0..8fa7fd8266 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -707,9 +707,7 @@ async def open_vlm_displays( last_val_sticky.update_from_data(-1, value) - _, _, vlm_curve = vlm_chart.update_graphics_from_flow( - 'volume', - ) + _, vlm_curve = vlm_viz.update_graphics() # size view to data once at outset vlm_chart.view._set_yrange( From 89e2e7fc54fff14fd56e998d3ebb626792145336 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 22 Jan 2023 15:45:56 -0500 Subject: [PATCH 054/136] Adjust `.update_graphics()` to expect `in_view: bool` in `_fsp.py` --- piker/ui/_dataviz.py | 1 - piker/ui/_fsp.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 89e4633766..494ef985d6 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1062,7 +1062,6 @@ def default_view( else: log.warning(f'Unknown view state {vl} -> {vr}') return - # raise RuntimeError(f'Unknown view state {vl} -> {vr}') else: # maintain the l->r view distance diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 8fa7fd8266..c546ec96be 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -707,7 +707,7 @@ async def open_vlm_displays( last_val_sticky.update_from_data(-1, value) - _, vlm_curve = vlm_viz.update_graphics() + _, _, vlm_curve = vlm_viz.update_graphics() # size view to data once at outset vlm_chart.view._set_yrange( From 896259d9e4ce3d6f25d0c3ba8d9ff7e8d4a2fc47 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 22 Jan 2023 15:48:56 -0500 Subject: [PATCH 055/136] When only one curve is in view, skip group ranging --- piker/ui/_interaction.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 3679938310..7281305066 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -915,7 +915,7 @@ def interact_graphics_cycle( ms_threshold=4, # XXX: important to avoid not seeing underlying - # ``.update_graphics_from_flow()`` nested profiling likely + # ``Viz.update_graphics()`` nested profiling likely # due to the way delaying works and garbage collection of # the profiler in the delegated method calls. delayed=True, @@ -957,7 +957,9 @@ def interact_graphics_cycle( # pass in no array which will read and render from the last # passed array (normally provided by the display loop.) - i_read_range, _ = viz.update_graphics() + in_view, i_read_range, _ = viz.update_graphics() + if not in_view: + continue profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') out = viz.maxmin(i_read_range=i_read_range) @@ -981,7 +983,11 @@ def interact_graphics_cycle( else: mxmns_by_common_pi[pi] = yrange - if viz.is_ohlc: + # TODO: a better predicate here, likely something + # to do with overlays and their settings.. + if ( + viz.is_ohlc + ): # print(f'adding {viz.name} to overlay') mxmn_groups[viz.name] = out @@ -997,9 +1003,19 @@ def interact_graphics_cycle( if ( len(mxmn_groups) < 2 ): - viz.plot.vb._set_yrange( - yrange=yrange, - ) + print(f'ONLY ranging major: {viz.name}') + for viz_name, out in mxmn_groups.items(): + ( + ixrng, + read_slc, + yrange, + ) = out + + # determine start datum in view + viz = chart._vizs[viz_name] + viz.plot.vb._set_yrange( + yrange=yrange, + ) return # proportional group auto-scaling per overlay set. From 776ffd2b1ca1eb7f745083f2bf46490ae9e74083 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 22 Jan 2023 16:07:52 -0500 Subject: [PATCH 056/136] Factor curve-dispersion sorting into primary loop We can determine the major curve (in view) in the first pass of all `Viz`s so drop the 2nd loop and thus the `mxmn_groups: dict`. Also simplifies logic for the case of only one (the major) curve in view. --- piker/ui/_interaction.py | 225 +++++++++++++++++++-------------------- 1 file changed, 108 insertions(+), 117 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 7281305066..18f4070e59 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -908,7 +908,6 @@ def interact_graphics_cycle( profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', # disabled=not pg_profile_enabled(), - # ms_threshold=ms_slower_then, disabled=True, @@ -941,16 +940,41 @@ def interact_graphics_cycle( tuple[float, float], ] = {} + # proportional group auto-scaling per overlay set. + # -> loop through overlays on each multi-chart widget + # and scale all y-ranges based on autoscale config. + # -> for any "group" overlay we want to dispersion normalize + # and scale minor charts onto the major chart: the chart + # with the most dispersion in the set. + major_viz: Viz = None + major_mx: float = 0 + major_mn: float = float('inf') + mx_up_rng: float = 0 + mn_down_rng: float = 0 + mx_disp: float = 0 + # collect certain flows have grapics objects **in seperate # plots/viewboxes** into groups and do a common calc to # determine auto-ranging input for `._set_yrange()`. # this is primarly used for our so called "log-linearized - mxmn_groups: dict[ - set[Viz], - set[Viz, tuple[float, float]], + # multi-plot" overlay technique. + start_datums: dict[ + ViewBox, + tuple[ + Viz, + float, # y start + float, # y min + float, # y max + float, # y median + slice, # in-view array slice + np.ndarray, # in-view array + ], ] = {} + max_istart: float = 0 + # major_in_view: np.ndarray = None for name, viz in chart._vizs.items(): + if not viz.render: # print(f'skipping {flow.name}') continue @@ -958,8 +982,10 @@ def interact_graphics_cycle( # pass in no array which will read and render from the last # passed array (normally provided by the display loop.) in_view, i_read_range, _ = viz.update_graphics() + if not in_view: continue + profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') out = viz.maxmin(i_read_range=i_read_range) @@ -968,11 +994,13 @@ def interact_graphics_cycle( return ( ixrng, - _, + read_slc, yrange ) = out pi = viz.plot + + # handle multiple graphics-objs per viewbox cases mxmn = mxmns_by_common_pi.get(pi) if mxmn: yrange = mxmns_by_common_pi[pi] = ( @@ -983,138 +1011,101 @@ def interact_graphics_cycle( else: mxmns_by_common_pi[pi] = yrange + # handle overlay log-linearized group scaling cases # TODO: a better predicate here, likely something # to do with overlays and their settings.. if ( viz.is_ohlc ): + ymn, ymx = yrange # print(f'adding {viz.name} to overlay') - mxmn_groups[viz.name] = out + # mxmn_groups[viz.name] = out + # viz = chart._vizs[viz_name] - else: - pi.vb._set_yrange(yrange=yrange) - profiler( - f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' - ) + # determine start datum in view + arr = viz.shm.array + in_view = arr[read_slc] + row_start = arr[read_slc.start - 1] - profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + max_istart = max(in_view[0]['index'], max_istart) - # if no overlays, set lone chart's yrange and short circuit - if ( - len(mxmn_groups) < 2 - ): - print(f'ONLY ranging major: {viz.name}') - for viz_name, out in mxmn_groups.items(): - ( - ixrng, - read_slc, - yrange, - ) = out + if viz.is_ohlc: + y_med = np.median(in_view['close']) + y_start = row_start['open'] + else: + y_med = np.median(in_view[viz.name]) + y_start = row_start[viz.name] - # determine start datum in view - viz = chart._vizs[viz_name] - viz.plot.vb._set_yrange( - yrange=yrange, + # x_start = ixrng[0] + # print( + # f'{viz.name} ->\n' + # f'(x_start: {x_start}, y_start: {y_start}\n' + # ) + start_datums[viz.plot.vb] = ( + viz, + y_start, + ymn, + ymx, + y_med, + read_slc, + in_view, ) - return - # proportional group auto-scaling per overlay set. - # -> loop through overlays on each multi-chart widget - # and scale all y-ranges based on autoscale config. - # -> for any "group" overlay we want to dispersion normalize - # and scale minor charts onto the major chart: the chart - # with the most dispersion in the set. - major_mx: float = 0 - major_mn: float = float('inf') - mx_up_rng: float = 0 - mn_down_rng: float = 0 - mx_disp: float = 0 - start_datums: dict[ - ViewBox, - tuple[ - Viz, - float, # y start - float, # y min - float, # y max - float, # y median - slice, # in-view array slice - np.ndarray, # in-view array - ], - ] = {} - max_istart: float = 0 - major_viz: Viz = None - # major_in_view: np.ndarray = None + # find curve with max dispersion + disp = abs(ymx - ymn) / y_med - for viz_name, out in mxmn_groups.items(): - ( - ixrng, - read_slc, - (ymn, ymx), - ) = out + # track the "major" curve as the curve with most + # dispersion. + if disp > mx_disp: + major_viz = viz + mx_disp = disp + major_mn = ymn + major_mx = ymx + # major_in_view = in_view + + # compute directional (up/down) y-range % swing/dispersion + y_ref = y_med + up_rng = (ymx - y_ref) / y_ref + down_rng = (ymn - y_ref) / y_ref - # determine start datum in view - viz = chart._vizs[viz_name] - arr = viz.shm.array - in_view = arr[read_slc] - row_start = arr[read_slc.start - 1] + mx_up_rng = max(mx_up_rng, up_rng) + mn_down_rng = min(mn_down_rng, down_rng) - max_istart = max(in_view[0]['index'], max_istart) + # print( + # f'{viz.name}@{chart_name} group mxmn calc\n' + # '--------------------\n' + # f'y_start: {y_start}\n' + # f'ymn: {ymn}\n' + # f'ymx: {ymx}\n' + # f'mx_disp: {mx_disp}\n' + # f'up %: {up_rng * 100}\n' + # f'down %: {down_rng * 100}\n' + # f'mx up %: {mx_up_rng * 100}\n' + # f'mn down %: {mn_down_rng * 100}\n' + # ) - if viz.is_ohlc: - y_med = np.median(in_view['close']) - y_start = row_start['open'] + # non-overlay group case else: - y_med = np.median(in_view[viz.name]) - y_start = row_start[viz.name] + pi.vb._set_yrange(yrange=yrange) + profiler( + f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' + ) - # x_start = ixrng[0] - # print( - # f'{viz.name} ->\n' - # f'(x_start: {x_start}, y_start: {y_start}\n' - # ) - start_datums[viz.plot.vb] = ( - viz, - y_start, - ymn, - ymx, - y_med, - read_slc, - in_view, - ) + profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + if not start_datums: + return - # find curve with max dispersion - disp = abs(ymx - ymn) / y_med - - # track the "major" curve as the curve with most - # dispersion. - if disp > mx_disp: - major_viz = viz - mx_disp = disp - major_mn = ymn - major_mx = ymx - # major_in_view = in_view - - # compute directional (up/down) y-range % swing/dispersion - y_ref = y_med - up_rng = (ymx - y_ref) / y_ref - down_rng = (ymn - y_ref) / y_ref - - mx_up_rng = max(mx_up_rng, up_rng) - mn_down_rng = min(mn_down_rng, down_rng) - - # print( - # f'{viz.name}@{chart_name} group mxmn calc\n' - # '--------------------\n' - # f'y_start: {y_start}\n' - # f'ymn: {ymn}\n' - # f'ymx: {ymx}\n' - # f'mx_disp: {mx_disp}\n' - # f'up %: {up_rng * 100}\n' - # f'down %: {down_rng * 100}\n' - # f'mx up %: {mx_up_rng * 100}\n' - # f'mn down %: {mn_down_rng * 100}\n' - # ) + # if no overlays, set lone chart's yrange and short circuit + if ( + len(start_datums) < 2 + ): + # print(f'ONLY ranging major: {viz.name}') + major_viz.plot.vb._set_yrange( + yrange=yrange, + ) + return + # conduct "log-linearized multi-plot" scalings for all groups for ( view, ( From 481f1b3d7eecf33b335c2110c3a0da8cc13ff954 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 23 Jan 2023 13:23:46 -0500 Subject: [PATCH 057/136] Fix intersect detection using time indexing Facepalm, obviously absolute array indexes are not going to necessarily align vs. time over multiple feeds/history. Instead use `np.searchsorted()` on whatever curve has the smallest support and find the appropriate index of intersection in time so that alignment always starts at a sensible reference. Also adds a `debug_print: bool` input arg which can enable all the prints when working on this. --- piker/ui/_interaction.py | 173 +++++++++++++++++++++------------------ 1 file changed, 95 insertions(+), 78 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 18f4070e59..fb7c66a468 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -20,7 +20,9 @@ """ from __future__ import annotations from contextlib import asynccontextmanager -import math +from math import ( + isinf, +) import time from typing import ( Optional, @@ -904,6 +906,8 @@ def x_uppx(self) -> float: def interact_graphics_cycle( self, + *args, + debug_print: bool = False, ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -971,7 +975,7 @@ def interact_graphics_cycle( ], ] = {} max_istart: float = 0 - # major_in_view: np.ndarray = None + major_in_view: np.ndarray = None for name, viz in chart._vizs.items(): @@ -1061,7 +1065,7 @@ def interact_graphics_cycle( mx_disp = disp major_mn = ymn major_mx = ymx - # major_in_view = in_view + major_in_view = in_view # compute directional (up/down) y-range % swing/dispersion y_ref = y_med @@ -1099,6 +1103,9 @@ def interact_graphics_cycle( if ( len(start_datums) < 2 ): + if not major_viz: + major_viz = viz + # print(f'ONLY ranging major: {viz.name}') major_viz.plot.vb._set_yrange( yrange=yrange, @@ -1125,13 +1132,6 @@ def interact_graphics_cycle( if viz is major_viz: ymn = y_min ymx = y_max - # print( - # f'{view.name} MAJOR mxmn\n' - # '--------------------\n' - # f'scaled ymn: {ymn}\n' - # f'scaled ymx: {ymx}\n' - # f'scaled mx_disp: {mx_disp}\n' - # ) continue else: @@ -1149,31 +1149,61 @@ def interact_graphics_cycle( # y-range based on the major curves y-range. # get intersection point y-values for both curves - mshm = major_viz.shm - - minor_i_start = minor_in_view[0]['index'] - major_i_start = mshm.array['index'][0] + minor_in_view_start = minor_in_view[0] + minor_i_start = minor_in_view_start['index'] + minor_i_start_t = minor_in_view_start['time'] + + major_in_view_start = major_in_view[0] + major_i_start = major_in_view_start['index'] + major_i_start_t = major_in_view_start['time'] + + y_major_intersect = major_in_view_start[key] + y_minor_intersect = minor_in_view_start[key] + + tdiff = (major_i_start_t - minor_i_start_t) + if debug_print: + print( + f'{major_viz.name} time diff with minor:\n' + f'maj:{major_i_start_t}\n' + '-\n' + f'min:{minor_i_start_t}\n' + f'=> {tdiff}\n' + ) - abs_i_start = max( - minor_i_start, - major_i_start, - ) + # major has later timestamp adjust minor + if tdiff > 0: + y_minor_i = np.searchsorted( + minor_in_view['time'], + major_i_start_t, + ) + y_minor_intersect = minor_in_view[y_minor_i][key] - y_maj_intersect = mshm._array[abs_i_start][key] - y_minor_intersect = viz.shm._array[abs_i_start][key] + # minor has later timestamp adjust major + elif tdiff < 0: + y_major_i = np.searchsorted( + major_in_view['time'], + minor_i_start_t, + ) + y_major_intersect = major_in_view[y_major_i][key] + + if debug_print: + print( + f'major_i_start: {major_i_start}\n' + f'major_i_start_t: {major_i_start_t}\n' + f'minor_i_start: {minor_i_start}\n' + f'minor_i_start_t: {minor_i_start_t}\n' + ) # TODO: probably write this as a compile cpython or # numba func. - # if abs_i_start > major_i_start: - # compute directional (up/down) y-range # % swing/dispersion starting at the reference index # determined by the above indexing arithmetic. - y_ref = y_maj_intersect + y_ref = y_major_intersect if not y_ref: log.warning( - f'BAD y_maj_intersect?!: {y_maj_intersect}' + f'BAD y_major_intersect?!: {y_major_intersect}' ) # breakpoint() @@ -1196,18 +1226,15 @@ def interact_graphics_cycle( y_ref = y_minor_intersect r_up_minor = (y_max - y_ref) / y_ref - # y_maj_ref = max( - # major_in_view[0][key], - # y_maj_intersect, - # ) - y_maj_ref = y_maj_intersect + y_maj_ref = y_major_intersect new_maj_ymx = y_maj_ref * (1 + r_up_minor) new_maj_mxmn = (major_mn, new_maj_ymx) - # print( - # f'{view.name} OUT OF RANGE:\n' - # '--------------------\n' - # f'y_max:{y_max} > ymx:{ymx}\n' - # ) + if debug_print: + print( + f'{view.name} OUT OF RANGE:\n' + '--------------------\n' + f'y_max:{y_max} > ymx:{ymx}\n' + ) ymx = y_max if y_min < ymn: @@ -1215,58 +1242,46 @@ def interact_graphics_cycle( y_ref = y_minor_intersect r_down_minor = (y_min - y_ref) / y_ref - # y_maj_ref = min( - # major_in_view[0][key], - # y_maj_intersect, - # ) - y_maj_ref = y_maj_intersect + y_maj_ref = y_major_intersect new_maj_ymn = y_maj_ref * (1 + r_down_minor) new_maj_mxmn = ( new_maj_ymn, new_maj_mxmn[1] if new_maj_mxmn else major_mx ) - # print( - # f'{view.name} OUT OF RANGE:\n' - # '--------------------\n' - # f'y_min:{y_min} < ymn:{ymn}\n' - # ) + if debug_print: + print( + f'{view.name} OUT OF RANGE:\n' + '--------------------\n' + f'y_min:{y_min} < ymn:{ymn}\n' + ) ymn = y_min - # now scale opposite side to compensate - # y_ref = y_major_intersect - # r_down_minor = (major_ - y_ref) / y_ref - if new_maj_mxmn: - # print( - # f'RESCALE MAJOR {major_viz.name}:\n' - # f'previous: {(major_mn, major_mx)}\n' - # f'new: {new_maj_mxmn}\n' - # ) - # major_viz.plot.vb._set_yrange( - # yrange=new_maj_mxmn, - # # range_margin=None, - # ) + if debug_print: + print( + f'RESCALE MAJOR {major_viz.name}:\n' + f'previous: {(major_mn, major_mx)}\n' + f'new: {new_maj_mxmn}\n' + ) major_mn, major_mx = new_maj_mxmn - # vrs = major_viz.plot.vb.viewRange() - # if vrs[1][0] > new_maj_mxmn[0]: - # breakpoint() - # print( - # f'{view.name} APPLY group mxmn\n' - # '--------------------\n' - # f'minor_y_start: {minor_y_start}\n' - # f'mn_down_rng: {mn_down_rng * 100}\n' - # f'mx_up_rng: {mx_up_rng * 100}\n' - # f'scaled ymn: {ymn}\n' - # f'scaled ymx: {ymx}\n' - # f'scaled mx_disp: {mx_disp}\n' - # ) + if debug_print: + print( + f'{view.name} APPLY group mxmn\n' + '--------------------\n' + f'y_minor_intersect: {y_minor_intersect}\n' + f'y_major_intersect: {y_major_intersect}\n' + f'mn_down_rng: {mn_down_rng * 100}\n' + f'mx_up_rng: {mx_up_rng * 100}\n' + f'scaled ymn: {ymn}\n' + f'scaled ymx: {ymx}\n' + f'scaled mx_disp: {mx_disp}\n' + ) if ( - math.isinf(ymx) - or math.isinf(ymn) + isinf(ymx) + or isinf(ymn) ): - # breakpoint() log.warning( f'BAD ymx/ymn: {(ymn, ymx)}' ) @@ -1281,11 +1296,13 @@ def interact_graphics_cycle( # inside of a single render cycle (and apparently calling # `ViewBox.setYRange()` multiple times within one only takes # the first call as serious...) XD - # print( - # f'Scale MAJOR {major_viz.name}:\n' - # f'previous: {(major_mn, major_mx)}\n' - # f'new: {new_maj_mxmn}\n' - # ) + if debug_print: + print( + f'Scale MAJOR {major_viz.name}:\n' + f'scaled mx_disp: {mx_disp}\n' + f'previous: {(major_mn, major_mx)}\n' + f'new: {new_maj_mxmn}\n' + ) major_viz.plot.vb._set_yrange( yrange=(major_mn, major_mx), ) From 497174c687b2b09dfdf4f86c2e00a4acfab5e62d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 23 Jan 2023 19:27:14 -0500 Subject: [PATCH 058/136] Add full profiling to `.interact_graphics_cycle()` --- piker/ui/_interaction.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index fb7c66a468..2e437af1ef 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -908,13 +908,14 @@ def interact_graphics_cycle( self, *args, debug_print: bool = False, + do_overlay_scaling: bool = True, ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', # disabled=not pg_profile_enabled(), # ms_threshold=ms_slower_then, - disabled=True, + disabled=False, ms_threshold=4, # XXX: important to avoid not seeing underlying @@ -974,7 +975,6 @@ def interact_graphics_cycle( np.ndarray, # in-view array ], ] = {} - max_istart: float = 0 major_in_view: np.ndarray = None for name, viz in chart._vizs.items(): @@ -1015,6 +1015,8 @@ def interact_graphics_cycle( else: mxmns_by_common_pi[pi] = yrange + profiler(f'{viz.name}@{chart_name} common pi sort') + # handle overlay log-linearized group scaling cases # TODO: a better predicate here, likely something # to do with overlays and their settings.. @@ -1031,8 +1033,6 @@ def interact_graphics_cycle( in_view = arr[read_slc] row_start = arr[read_slc.start - 1] - max_istart = max(in_view[0]['index'], max_istart) - if viz.is_ohlc: y_med = np.median(in_view['close']) y_start = row_start['open'] @@ -1040,6 +1040,8 @@ def interact_graphics_cycle( y_med = np.median(in_view[viz.name]) y_start = row_start[viz.name] + profiler(f'{viz.name}@{chart_name} MINOR curve median') + # x_start = ixrng[0] # print( # f'{viz.name} ->\n' @@ -1066,6 +1068,7 @@ def interact_graphics_cycle( major_mn = ymn major_mx = ymx major_in_view = in_view + profiler(f'{viz.name}@{chart_name} set new major') # compute directional (up/down) y-range % swing/dispersion y_ref = y_med @@ -1087,12 +1090,13 @@ def interact_graphics_cycle( # f'mx up %: {mx_up_rng * 100}\n' # f'mn down %: {mn_down_rng * 100}\n' # ) + profiler(f'{viz.name}@{chart_name} MINOR curve scale') # non-overlay group case else: pi.vb._set_yrange(yrange=yrange) profiler( - f'{viz.name}@{chart_name} `Viz.plot.vb._set_yrange()`' + f'{viz.name}@{chart_name} simple std `._set_yrange()`' ) profiler(f'<{chart_name}>.interact_graphics_cycle({name})') @@ -1102,6 +1106,7 @@ def interact_graphics_cycle( # if no overlays, set lone chart's yrange and short circuit if ( len(start_datums) < 2 + or not do_overlay_scaling ): if not major_viz: major_viz = viz @@ -1110,6 +1115,7 @@ def interact_graphics_cycle( major_viz.plot.vb._set_yrange( yrange=yrange, ) + profiler(f'{viz.name}@{chart_name} single curve yrange') return # conduct "log-linearized multi-plot" scalings for all groups @@ -1160,6 +1166,8 @@ def interact_graphics_cycle( y_major_intersect = major_in_view_start[key] y_minor_intersect = minor_in_view_start[key] + profiler(f'{viz.name}@{chart_name} intersect detection') + tdiff = (major_i_start_t - minor_i_start_t) if debug_print: print( @@ -1177,6 +1185,7 @@ def interact_graphics_cycle( major_i_start_t, ) y_minor_intersect = minor_in_view[y_minor_i][key] + profiler(f'{viz.name}@{chart_name} intersect by t') # minor has later timestamp adjust major elif tdiff < 0: @@ -1186,6 +1195,8 @@ def interact_graphics_cycle( ) y_major_intersect = major_in_view[y_major_i][key] + profiler(f'{viz.name}@{chart_name} intersect by t') + if debug_print: print( f'major_i_start: {major_i_start}\n' @@ -1214,6 +1225,8 @@ def interact_graphics_cycle( ymn = minor_y_start * (1 + r_down) ymx = minor_y_start * (1 + r_up) + profiler(f'{viz.name}@{chart_name} SCALE minor') + # XXX: handle out of view cases where minor curve # now is outside the range of the major curve. in # this case we then re-scale the major curve to @@ -1236,6 +1249,7 @@ def interact_graphics_cycle( f'y_max:{y_max} > ymx:{ymx}\n' ) ymx = y_max + profiler(f'{viz.name}@{chart_name} re-SCALE major UP') if y_min < ymn: @@ -1256,6 +1270,10 @@ def interact_graphics_cycle( ) ymn = y_min + profiler( + f'{viz.name}@{chart_name} re-SCALE major DOWN' + ) + if new_maj_mxmn: if debug_print: print( @@ -1290,6 +1308,7 @@ def interact_graphics_cycle( view._set_yrange( yrange=(ymn, ymx), ) + profiler(f'{viz.name}@{chart_name} log-SCALE minor') # NOTE XXX: we have to set the major curve's range once (and # only once) here since we're doing this entire routine @@ -1306,6 +1325,7 @@ def interact_graphics_cycle( major_viz.plot.vb._set_yrange( yrange=(major_mn, major_mx), ) + profiler(f'{viz.name}@{chart_name} log-SCALE major') # major_mx, major_mn = new_maj_mxmn # vrs = major_viz.plot.vb.viewRange() # if vrs[1][0] > major_mn: From 9418f53244f2b0f50401d5a953f44a5f853d7153 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 23 Jan 2023 20:03:00 -0500 Subject: [PATCH 059/136] Speed up ranging in display loop use the new `do_overlay_scaling: bool` since we know each feed will have its own updates (cuz multiplexed by feed..) and we can avoid ranging/scaling overlays that will make their own calls. Also, pass in the last datum "brighter" color for ohlc curves as it was originally (and now that we can pass that styling bit through). --- piker/ui/_display.py | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index c934f089d1..55f18f80b4 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -261,7 +261,10 @@ async def increment_history_view( profiler('`hist Viz.update_graphics()` call') if liv: - hist_viz.plot.vb._set_yrange(viz=hist_viz) + # hist_viz.plot.vb._set_yrange(viz=hist_viz) + hist_viz.plot.vb.interact_graphics_cycle( + do_overlay_scaling=False, + ) profiler('hist chart yrange view') # check if tread-in-place view x-shift is needed @@ -715,20 +718,18 @@ def graphics_update_cycle( or not main_vb._ic.is_set() ): yr = (mn, mx) - # print( - # f'MAIN VIZ yrange update\n' - # f'{fqsn}: {yr}' - # ) - - main_vb._set_yrange( - # TODO: we should probably scale - # the view margin based on the size - # of the true range? This way you can - # slap in orders outside the current - # L1 (only) book range. - # range_margin=0.1, - yrange=yr + main_vb.interact_graphics_cycle( + do_overlay_scaling=False, ) + # TODO: we should probably scale + # the view margin based on the size + # of the true range? This way you can + # slap in orders outside the current + # L1 (only) book range. + # main_vb._set_yrange( + # yrange=yr + # # range_margin=0.1, + # ) profiler('main vb y-autorange') # SLOW CHART resize case @@ -1224,6 +1225,9 @@ async def display_symbol_data( # to avoid internal pane creation. # sidepane=False, sidepane=godwidget.search, + draw_kwargs={ + 'last_step_color': 'original', + }, ) # ensure the last datum graphic is generated @@ -1242,6 +1246,9 @@ async def display_symbol_data( # in the case of history chart we explicitly set `False` # to avoid internal pane creation. sidepane=pp_pane, + draw_kwargs={ + 'last_step_color': 'original', + }, ) rt_viz = rt_chart.get_viz(fqsn) pis.setdefault(fqsn, [None, None])[0] = rt_chart.plotItem From ec8679ad74a9316bbf544c86b5be1aceddaa60c4 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 23 Jan 2023 20:22:45 -0500 Subject: [PATCH 060/136] Add `Viz.median_from_range()` A super snappy `numpy.median()` calculator (per input range) which we slap an `lru_cache` on thanks to handy dunder method hacks for such things on mutable types XD --- piker/ui/_dataviz.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 494ef985d6..32f400a1fa 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -19,6 +19,7 @@ ''' from __future__ import annotations +from functools import lru_cache from math import ( ceil, floor, @@ -282,6 +283,21 @@ class Viz(msgspec.Struct): # , frozen=True): tuple[float, float], ] = {} + # cache of median calcs from input read slice hashes + # see `.median()` + _meds: dict[ + int, + float, + ] = {} + + # to make lru_cache-ing work, see + # https://docs.python.org/3/faq/programming.html#how-do-i-cache-method-calls + def __eq__(self, other): + return self._shm._token == other._shm._token + + def __hash__(self): + return hash(self._shm._token) + @property def shm(self) -> ShmArray: return self._shm @@ -462,6 +478,19 @@ def maxmin( mxmn, ) + @lru_cache(maxsize=6116) + def median_from_range( + self, + start: int, + stop: int, + + ) -> float: + in_view = self.shm.array[start:stop] + if self.is_ohlc: + return np.median(in_view['close']) + else: + return np.median(in_view[self.name]) + def view_range(self) -> tuple[int, int]: ''' Return the start and stop x-indexes for the managed ``ViewBox``. From cf67c790e5352095ef3966323317ffa418e5d582 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 23 Jan 2023 20:25:16 -0500 Subject: [PATCH 061/136] Use new cached median method in overlay scaling Massively speeds up scaling transform cycles (duh). Also includes a draft for an "overlay transform" type/api; obviously still a WIP :surfer:.. --- piker/ui/_interaction.py | 59 +++++++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 2e437af1ef..bf119be20d 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -42,6 +42,7 @@ from ..log import get_logger from .._profile import Profiler from .._profile import pg_profile_enabled, ms_slower_then +from ..data.types import Struct # from ._style import _min_points_to_show from ._editors import SelectRect from . import _event @@ -343,6 +344,49 @@ async def handle_viewmode_mouse( view.order_mode.submit_order() +class OverlayT(Struct): + ''' + An overlay co-domain range transformer. + + Used to translate and apply a range from one y-range + to another based on a returns logarithm: + + R(ymn, ymx, yref) = (ymx - yref)/yref + + which gives the log-scale multiplier, and + + ymx_t = yref * (1 + R) + + which gives the inverse to translate to the same value + in the target co-domain. + + ''' + viz: Viz # viz with largest measured dispersion + + mx: float = 0 + mn: float = float('inf') + + up_swing: float = 0 + down_swing: float = 0 + disp: float = 0 + + def loglin_from_range( + self, + + y_ref: float, # reference value for dispersion metric + mn: float, # min y in target log-lin range + mx: float, # max y in target log-lin range + offset: float, # y-offset to start log-scaling from + + ) -> tuple[float, float]: + r_up = (mx - y_ref) / y_ref + r_down = (mn - y_ref) / y_ref + ymn = offset * (1 + r_down) + ymx = offset * (1 + r_up) + + return ymn, ymx + + class ChartView(ViewBox): ''' Price chart view box with interaction behaviors you'd expect from @@ -1034,19 +1078,20 @@ def interact_graphics_cycle( row_start = arr[read_slc.start - 1] if viz.is_ohlc: - y_med = np.median(in_view['close']) + y_med = viz.median_from_range( + read_slc.start, + read_slc.stop, + ) y_start = row_start['open'] else: - y_med = np.median(in_view[viz.name]) + y_med = viz.median_from_range( + read_slc.start, + read_slc.stop, + ) y_start = row_start[viz.name] profiler(f'{viz.name}@{chart_name} MINOR curve median') - # x_start = ixrng[0] - # print( - # f'{viz.name} ->\n' - # f'(x_start: {x_start}, y_start: {y_start}\n' - # ) start_datums[viz.plot.vb] = ( viz, y_start, From e06d4b405d8b8e6a1fe856bc3d3c72ed14973938 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 12:32:42 -0500 Subject: [PATCH 062/136] Add linked charts guard-flag for use in display loop --- piker/ui/_interaction.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index bf119be20d..a0b67aeb61 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -803,7 +803,7 @@ def _set_yrange( ''' name = self.name - # print(f'YRANGE ON {name}') + # print(f'YRANGE ON {name} -> yrange{yrange}') profiler = Profiler( msg=f'`ChartView._set_yrange()`: `{name}`', disabled=not pg_profile_enabled(), @@ -950,9 +950,11 @@ def x_uppx(self) -> float: def interact_graphics_cycle( self, - *args, + *args, # capture signal-handler related shit + debug_print: bool = False, do_overlay_scaling: bool = True, + do_linked_charts: bool = True, ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -974,7 +976,10 @@ def interact_graphics_cycle( plots = {chart.name: chart} linked = self.linked - if linked: + if ( + do_linked_charts + and linked + ): plots |= linked.subplots for chart_name, chart in plots.items(): From 5eaca18ee0f708cb96c4914cf41f3b5bea007075 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 12:33:11 -0500 Subject: [PATCH 063/136] Don't skip overlay scaling in disp-loop for now --- piker/ui/_display.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 55f18f80b4..b1af26f511 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -263,7 +263,7 @@ async def increment_history_view( if liv: # hist_viz.plot.vb._set_yrange(viz=hist_viz) hist_viz.plot.vb.interact_graphics_cycle( - do_overlay_scaling=False, + # do_overlay_scaling=False, ) profiler('hist chart yrange view') @@ -717,9 +717,11 @@ def graphics_update_cycle( main_vb._ic is None or not main_vb._ic.is_set() ): - yr = (mn, mx) + # TODO: incremenal update of the median + # and maxmin driving the y-autoranging. + # yr = (mn, mx) main_vb.interact_graphics_cycle( - do_overlay_scaling=False, + # do_overlay_scaling=False, ) # TODO: we should probably scale # the view margin based on the size From ea8450568207f41baf4219eb6c3a3e659e5e39ce Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 12:46:09 -0500 Subject: [PATCH 064/136] Don't scale overlays on linked from display loop In the (incrementally updated) display loop we have range logic that is incrementally updated in real-time by streams, as such we don't really need to update all linked chart's (for any given, currently updated chart) y-ranges on calls of each separate (sub-)chart's `ChartView.interact_graphics_cycle()`. In practise there are plenty of cases where resizing in one chart (say the vlm fsps sub-plot) requires a y-range re-calc but not in the OHLC price chart. Therefore we always avoid doing more resizing then necessary despite it resulting in potentially more method call overhead (which will later be justified by better leveraging incrementally updated `Viz.maxmin()` and `media_from_range()` calcs). --- piker/ui/_display.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index b1af26f511..72b0d76d04 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -261,8 +261,8 @@ async def increment_history_view( profiler('`hist Viz.update_graphics()` call') if liv: - # hist_viz.plot.vb._set_yrange(viz=hist_viz) hist_viz.plot.vb.interact_graphics_cycle( + do_linked_charts=False, # do_overlay_scaling=False, ) profiler('hist chart yrange view') @@ -586,7 +586,6 @@ def graphics_update_cycle( or trigger_all ): chart.increment_view(datums=append_diff) - # main_viz.plot.vb._set_yrange(viz=main_viz) # NOTE: since vlm and ohlc charts are axis linked now we don't # need the double increment request? @@ -722,16 +721,14 @@ def graphics_update_cycle( # yr = (mn, mx) main_vb.interact_graphics_cycle( # do_overlay_scaling=False, + do_linked_charts=False, ) # TODO: we should probably scale # the view margin based on the size # of the true range? This way you can # slap in orders outside the current # L1 (only) book range. - # main_vb._set_yrange( - # yrange=yr - # # range_margin=0.1, - # ) + profiler('main vb y-autorange') # SLOW CHART resize case @@ -854,9 +851,15 @@ def graphics_update_cycle( mx_vlm_in_view != varz['last_mx_vlm'] ): varz['last_mx_vlm'] = mx_vlm_in_view + + # TODO: incr maxmin update as pass into below.. # vlm_yr = (0, mx_vlm_in_view * 1.375) - # vlm_chart.view._set_yrange(yrange=vlm_yr) - # profiler('`vlm_chart.view._set_yrange()`') + + main_vlm_viz.plot.vb.interact_graphics_cycle( + # do_overlay_scaling=False, + do_linked_charts=False, + ) + profiler('`vlm_chart.view.interact_graphics_cycle()`') # update all downstream FSPs for curve_name, viz in vlm_vizs.items(): @@ -884,10 +887,10 @@ def graphics_update_cycle( # resizing from last quote?) # XXX: without this we get completely # mangled/empty vlm display subchart.. - # fvb = viz.plot.vb - # fvb._set_yrange( - # viz=viz, - # ) + fvb = viz.plot.vb + fvb.interact_graphics_cycle( + do_linked_charts=False, + ) profiler(f'vlm `Viz[{viz.name}].plot.vb._set_yrange()`') # even if we're downsampled bigly From 517c68f3ad0ced22ce06d6c4485055ae66138120 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 13:04:10 -0500 Subject: [PATCH 065/136] Use `._pathops.slice_from_time()` for overlay intersects It's way faster since it uses a uniform time arithmetic to narrow the `numpy.searchsorted()` range before actually doing the index search B) --- piker/ui/_interaction.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index a0b67aeb61..229d26841f 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -43,6 +43,7 @@ from .._profile import Profiler from .._profile import pg_profile_enabled, ms_slower_then from ..data.types import Struct +from ..data._pathops import slice_from_time # from ._style import _min_points_to_show from ._editors import SelectRect from . import _event @@ -1230,20 +1231,22 @@ def interact_graphics_cycle( # major has later timestamp adjust minor if tdiff > 0: - y_minor_i = np.searchsorted( - minor_in_view['time'], - major_i_start_t, + slc = slice_from_time( + arr=minor_in_view, + start_t=major_i_start_t, + stop_t=major_i_start_t, ) - y_minor_intersect = minor_in_view[y_minor_i][key] + y_minor_intersect = minor_in_view[slc.start][key] profiler(f'{viz.name}@{chart_name} intersect by t') # minor has later timestamp adjust major elif tdiff < 0: - y_major_i = np.searchsorted( - major_in_view['time'], - minor_i_start_t, + slc = slice_from_time( + arr=major_in_view, + start_t=minor_i_start_t, + stop_t=minor_i_start_t, ) - y_major_intersect = major_in_view[y_major_i][key] + y_major_intersect = major_in_view[slc.start][key] profiler(f'{viz.name}@{chart_name} intersect by t') From 5a8fd42c0cef5ead21cc7aa933b6d82ed1334556 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 17:16:23 -0500 Subject: [PATCH 066/136] Lul, actually scaled main chart from linked set This was a subtle logic error when building the `plots: dict` we weren't adding the "main (ohlc or other source) chart" from the `LinkedSplits` set when interacting with some sub-chart from `.subplots`.. Further this tries out bypassing `numpy.median()` altogether by just using `median = (ymx - ymn) / 2` which should be nearly the same? --- piker/ui/_interaction.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 229d26841f..72ba01ed3c 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -959,29 +959,31 @@ def interact_graphics_cycle( ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', - # disabled=not pg_profile_enabled(), - # ms_threshold=ms_slower_then, - - disabled=False, - ms_threshold=4, + disabled=not pg_profile_enabled(), + ms_threshold=ms_slower_then, # XXX: important to avoid not seeing underlying # ``Viz.update_graphics()`` nested profiling likely # due to the way delaying works and garbage collection of # the profiler in the delegated method calls. delayed=True, + + # for hardcore latency checking, comment these flags above. + # disabled=False, + # ms_threshold=4, ) # TODO: a faster single-loop-iterator way of doing this XD chart = self._chart - plots = {chart.name: chart} - linked = self.linked if ( do_linked_charts and linked ): + plots = {linked.chart.name: linked.chart} plots |= linked.subplots + else: + plots = {chart.name: chart} for chart_name, chart in plots.items(): @@ -1083,17 +1085,19 @@ def interact_graphics_cycle( in_view = arr[read_slc] row_start = arr[read_slc.start - 1] + y_med = (ymx - ymn) / 2 if viz.is_ohlc: - y_med = viz.median_from_range( - read_slc.start, - read_slc.stop, - ) + # y_med = (ymx - ymin) / 2 + # y_med = viz.median_from_range( + # read_slc.start, + # read_slc.stop, + # ) y_start = row_start['open'] else: - y_med = viz.median_from_range( - read_slc.start, - read_slc.stop, - ) + # y_med = viz.median_from_range( + # read_slc.start, + # read_slc.stop, + # ) y_start = row_start[viz.name] profiler(f'{viz.name}@{chart_name} MINOR curve median') From 7ebcd6d734299e322e8e4ec0f27a07bc07027c68 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 18:21:11 -0500 Subject: [PATCH 067/136] Comment out all median usage, turns out it's uneeded.. --- piker/ui/_interaction.py | 41 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 72ba01ed3c..74f3afc48a 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -973,7 +973,6 @@ def interact_graphics_cycle( # ms_threshold=4, ) - # TODO: a faster single-loop-iterator way of doing this XD chart = self._chart linked = self.linked if ( @@ -985,6 +984,7 @@ def interact_graphics_cycle( else: plots = {chart.name: chart} + # TODO: a faster single-loop-iterator way of doing this? for chart_name, chart in plots.items(): # Common `PlotItem` maxmin table; presumes that some path @@ -1006,8 +1006,8 @@ def interact_graphics_cycle( major_viz: Viz = None major_mx: float = 0 major_mn: float = float('inf') - mx_up_rng: float = 0 - mn_down_rng: float = 0 + # mx_up_rng: float = 0 + # mn_down_rng: float = 0 mx_disp: float = 0 # collect certain flows have grapics objects **in seperate @@ -1085,19 +1085,14 @@ def interact_graphics_cycle( in_view = arr[read_slc] row_start = arr[read_slc.start - 1] - y_med = (ymx - ymn) / 2 + # y_med = (ymx - ymn) / 2 + # y_med = viz.median_from_range( + # read_slc.start, + # read_slc.stop, + # ) if viz.is_ohlc: - # y_med = (ymx - ymin) / 2 - # y_med = viz.median_from_range( - # read_slc.start, - # read_slc.stop, - # ) y_start = row_start['open'] else: - # y_med = viz.median_from_range( - # read_slc.start, - # read_slc.stop, - # ) y_start = row_start[viz.name] profiler(f'{viz.name}@{chart_name} MINOR curve median') @@ -1107,13 +1102,13 @@ def interact_graphics_cycle( y_start, ymn, ymx, - y_med, + # y_med, read_slc, in_view, ) # find curve with max dispersion - disp = abs(ymx - ymn) / y_med + disp = abs(ymx - ymn) / y_start # track the "major" curve as the curve with most # dispersion. @@ -1126,12 +1121,12 @@ def interact_graphics_cycle( profiler(f'{viz.name}@{chart_name} set new major') # compute directional (up/down) y-range % swing/dispersion - y_ref = y_med - up_rng = (ymx - y_ref) / y_ref - down_rng = (ymn - y_ref) / y_ref + # y_ref = y_med + # up_rng = (ymx - y_ref) / y_ref + # down_rng = (ymn - y_ref) / y_ref - mx_up_rng = max(mx_up_rng, up_rng) - mn_down_rng = min(mn_down_rng, down_rng) + # mx_up_rng = max(mx_up_rng, up_rng) + # mn_down_rng = min(mn_down_rng, down_rng) # print( # f'{viz.name}@{chart_name} group mxmn calc\n' @@ -1181,7 +1176,7 @@ def interact_graphics_cycle( y_start, y_min, y_max, - y_med, + # y_med, read_slc, minor_in_view, ) @@ -1346,8 +1341,8 @@ def interact_graphics_cycle( '--------------------\n' f'y_minor_intersect: {y_minor_intersect}\n' f'y_major_intersect: {y_major_intersect}\n' - f'mn_down_rng: {mn_down_rng * 100}\n' - f'mx_up_rng: {mx_up_rng * 100}\n' + # f'mn_down_rng: {mn_down_rng * 100}\n' + # f'mx_up_rng: {mx_up_rng * 100}\n' f'scaled ymn: {ymn}\n' f'scaled ymx: {ymx}\n' f'scaled mx_disp: {mx_disp}\n' From 246d07021ec8ceb7aa4cde98f5462d5726648f35 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 24 Jan 2023 18:45:35 -0500 Subject: [PATCH 068/136] Drop old loop and wait on fsp engine tasks startups --- piker/ui/_fsp.py | 35 +++++++++++------------------------ 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index c546ec96be..9f1eec6559 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -608,6 +608,7 @@ async def open_vlm_displays( linked: LinkedSplits, flume: Flume, dvlm: bool = True, + loglevel: str = 'info', task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED, @@ -710,9 +711,9 @@ async def open_vlm_displays( _, _, vlm_curve = vlm_viz.update_graphics() # size view to data once at outset - vlm_chart.view._set_yrange( - viz=vlm_viz - ) + # vlm_chart.view._set_yrange( + # viz=vlm_viz + # ) # add axis title axis = vlm_chart.getAxis('right') @@ -734,22 +735,8 @@ async def open_vlm_displays( }, }, }, - # loglevel, + loglevel, ) - tasks_ready.append(started) - - # FIXME: we should error on starting the same fsp right - # since it might collide with existing shm.. or wait we - # had this before?? - # dolla_vlm - - tasks_ready.append(started) - # profiler(f'created shm for fsp actor: {display_name}') - - # wait for all engine tasks to startup - async with trio.open_nursery() as n: - for event in tasks_ready: - n.start_soon(event.wait) # dolla vlm overlay # XXX: the main chart already contains a vlm "units" axis @@ -825,6 +812,7 @@ def chart_curves( ) assert viz.plot is pi + await started.wait() chart_curves( dvlm_fields, dvlm_pi, @@ -833,19 +821,17 @@ def chart_curves( step_mode=True, ) - # spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is - # up since this one depends on it. - + # NOTE: spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is + # up since calculating vlm "rates" obvs first requires the + # underlying vlm event feed ;) fr_flume, started = await admin.start_engine_task( flow_rates, { # fsp engine conf 'func_name': 'flow_rates', 'zero_on_step': True, }, - # loglevel, + loglevel, ) - await started.wait() - # chart_curves( # dvlm_rate_fields, # dvlm_pi, @@ -888,6 +874,7 @@ def chart_curves( ) tr_pi.hideAxis('bottom') + await started.wait() chart_curves( trade_rate_fields, tr_pi, From 5dd69b22957c547c9286272e6e0e6107308a32b5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 25 Jan 2023 09:11:34 -0500 Subject: [PATCH 069/136] Better handle dynamic registry sampler broadcasts In situations where clients are (dynamically) subscribing *while* broadcasts are starting to taking place we need to handle the `set`-modified-during-iteration case. This scenario seems to be more common during races on concurrent startup of multiple symbols. The solution here is to use another set to take note of subscribers which are successfully sent-to and then skipping them on re-try. This also contains an attempt to exception-handle throttled stream overruns caused by higher frequency feeds (like binance) pushing more quotes then can be handled during (UI) client startup. --- piker/data/_sampling.py | 46 +++++++++++++++++++++++++++++------------ piker/data/feed.py | 3 +++ 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py index f44304bf93..ec29c6ae2e 100644 --- a/piker/data/_sampling.py +++ b/piker/data/_sampling.py @@ -253,20 +253,30 @@ async def broadcast( # f'consumers: {subs}' ) borked: set[tractor.MsgStream] = set() - for stream in subs: + sent: set[tractor.MsgStream] = set() + while True: try: - await stream.send({ - 'index': time_stamp or last_ts, - 'period': period_s, - }) - except ( - trio.BrokenResourceError, - trio.ClosedResourceError - ): - log.error( - f'{stream._ctx.chan.uid} dropped connection' - ) - borked.add(stream) + for stream in (subs - sent): + try: + await stream.send({ + 'index': time_stamp or last_ts, + 'period': period_s, + }) + sent.add(stream) + + except ( + trio.BrokenResourceError, + trio.ClosedResourceError + ): + log.error( + f'{stream._ctx.chan.uid} dropped connection' + ) + borked.add(stream) + else: + break + except RuntimeError: + log.warning(f'Client subs {subs} changed while broadcasting') + continue for stream in borked: try: @@ -848,6 +858,16 @@ async def uniform_rate_send( # rate timing exactly lul try: await stream.send({sym: first_quote}) + except tractor.RemoteActorError as rme: + if rme.type is not tractor._exceptions.StreamOverrun: + raise + ctx = stream._ctx + chan = ctx.chan + log.warning( + 'Throttled quote-stream overrun!\n' + f'{sym}:{ctx.cid}@{chan.uid}' + ) + except ( # NOTE: any of these can be raised by ``tractor``'s IPC # transport-layer and we want to be highly resilient diff --git a/piker/data/feed.py b/piker/data/feed.py index 69d5be7dc9..7efd5eb328 100644 --- a/piker/data/feed.py +++ b/piker/data/feed.py @@ -1589,6 +1589,9 @@ async def open_feed( (brokermod, bfqsns), ) in zip(ctxs, providers.items()): + # NOTE: do it asap to avoid overruns during multi-feed setup? + ctx._backpressure = backpressure + for fqsn, flume_msg in flumes_msg_dict.items(): flume = Flume.from_msg(flume_msg) assert flume.symbol.fqsn == fqsn From 9930f25ad30e4f03bd6a0f9d750132cfd7448db2 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 21 Jan 2023 18:39:41 -0500 Subject: [PATCH 070/136] Move axis hiding into `.overlay_plotitem()` Since we pretty much always want the 'bottom' and any side that is not declared by the caller move the axis hides into this method. Lets us drop the same calls in `.ui._fsp` and `._display`. This also disables the auto-ranging back-linking for now since it doesn't seem to be working quite yet? --- piker/ui/_chart.py | 43 +++++++++++++++++++++++++++---------------- piker/ui/_display.py | 10 ---------- piker/ui/_fsp.py | 9 +++------ 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 88ac871bb9..96187bf2b7 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -634,6 +634,7 @@ def add_plot( axis.pi = cpw.plotItem cpw.hideAxis('left') + # cpw.removeAxis('left') cpw.hideAxis('bottom') if ( @@ -750,12 +751,12 @@ def add_plot( # NOTE: back-link the new sub-chart to trigger y-autoranging in # the (ohlc parent) main chart for this linked set. - if self.chart: - main_viz = self.chart.get_viz(self.chart.name) - self.chart.view.enable_auto_yrange( - src_vb=cpw.view, - viz=main_viz, - ) + # if self.chart: + # main_viz = self.chart.get_viz(self.chart.name) + # self.chart.view.enable_auto_yrange( + # src_vb=cpw.view, + # viz=main_viz, + # ) graphics = viz.graphics data_key = viz.name @@ -1106,6 +1107,12 @@ def overlay_plotitem( pi.chart_widget = self pi.hideButtons() + # hide all axes not named by ``axis_side`` + for axname in ( + ({'bottom'} | allowed_sides) - {axis_side} + ): + pi.hideAxis(axname) + # compose this new plot's graphics with the current chart's # existing one but with separate axes as neede and specified. self.pi_overlay.add_plotitem( @@ -1209,17 +1216,21 @@ def draw_curve( pi = overlay if add_sticky: - axis = pi.getAxis(add_sticky) - if pi.name not in axis._stickies: - if pi is not self.plotItem: - overlay = self.pi_overlay - assert pi in overlay.overlays - overlay_axis = overlay.get_axis( - pi, - add_sticky, - ) - assert overlay_axis is axis + if pi is not self.plotItem: + # overlay = self.pi_overlay + # assert pi in overlay.overlays + overlay = self.pi_overlay + assert pi in overlay.overlays + axis = overlay.get_axis( + pi, + add_sticky, + ) + + else: + axis = pi.getAxis(add_sticky) + + if pi.name not in axis._stickies: # TODO: UGH! just make this not here! we should # be making the sticky from code which has access diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 72b0d76d04..fde3124e26 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -1320,13 +1320,6 @@ async def display_symbol_data( name=fqsn, axis_title=fqsn, ) - # only show a singleton bottom-bottom axis by default. - hist_pi.hideAxis('bottom') - - # XXX: TODO: THIS WILL CAUSE A GAP ON OVERLAYS, - # i think it needs to be "removed" instead when there - # are none? - hist_pi.hideAxis('left') hist_viz = hist_chart.draw_curve( fqsn, @@ -1362,9 +1355,6 @@ async def display_symbol_data( axis_title=fqsn, ) - rt_pi.hideAxis('left') - rt_pi.hideAxis('bottom') - rt_viz = rt_chart.draw_curve( fqsn, ohlcv, diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 9f1eec6559..6da93b718c 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -691,7 +691,7 @@ async def open_vlm_displays( # the axis on the left it's totally not lined up... # show volume units value on LHS (for dinkus) # vlm_chart.hideAxis('right') - # vlm_chart.showAxis('left') + vlm_chart.hideAxis('left') # send back new chart to caller task_status.started(vlm_chart) @@ -759,10 +759,6 @@ async def open_vlm_displays( }, ) - # TODO: should this maybe be implicit based on input args to - # `.overlay_plotitem()` above? - dvlm_pi.hideAxis('bottom') - # all to be overlayed curve names dvlm_fields = [ 'dolla_vlm', @@ -845,6 +841,8 @@ def chart_curves( # liquidity events (well at least on low OHLC periods - 1s). vlm_curve.hide() vlm_chart.removeItem(vlm_curve) + # vlm_chart.plotItem.layout.setMinimumWidth(0) + # vlm_chart.removeAxis('left') vlm_viz = vlm_chart._vizs['volume'] vlm_viz.render = False @@ -872,7 +870,6 @@ def chart_curves( }, ) - tr_pi.hideAxis('bottom') await started.wait() chart_curves( From 7a83a7288c887a09be28eaf5e7ebe95b63f70c0f Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 30 Jan 2023 11:33:30 -0500 Subject: [PATCH 071/136] Update profile msgs to new apis --- piker/ui/_display.py | 2 +- piker/ui/_interaction.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index fde3124e26..acef98e51f 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -891,7 +891,7 @@ def graphics_update_cycle( fvb.interact_graphics_cycle( do_linked_charts=False, ) - profiler(f'vlm `Viz[{viz.name}].plot.vb._set_yrange()`') + profiler(f'vlm `Viz[{viz.name}].plot.vb.interact_graphics_cycle()`') # even if we're downsampled bigly # draw the last datum in the final diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 74f3afc48a..dc0e47ed3a 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -1053,6 +1053,7 @@ def interact_graphics_cycle( read_slc, yrange ) = out + profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') pi = viz.plot From 2ed43c07581a25de5fb448d58eaddd92cb301ab1 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 2 Feb 2023 12:00:19 -0500 Subject: [PATCH 072/136] Fix profiler f-string --- piker/ui/_display.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index acef98e51f..cd0b3ac10f 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -577,7 +577,7 @@ def graphics_update_cycle( mx = mx_in_view + tick_margin mn = mn_in_view - tick_margin - profiler('{fqsdn} `multi_maxmin()` call') + profiler(f'{fqsn} `multi_maxmin()` call') # don't real-time "shift" the curve to the # left unless we get one of the following: @@ -891,7 +891,9 @@ def graphics_update_cycle( fvb.interact_graphics_cycle( do_linked_charts=False, ) - profiler(f'vlm `Viz[{viz.name}].plot.vb.interact_graphics_cycle()`') + profiler( + f'Viz[{viz.name}].plot.vb.interact_graphics_cycle()`' + ) # even if we're downsampled bigly # draw the last datum in the final From c690e141e1d029f60d1950188ddb6f75e00fe39c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 3 Feb 2023 10:49:46 -0500 Subject: [PATCH 073/136] Don't unset `Viz.render` for unit vlm Such that we still y-range auto-sort inside `ChartView.interact_graphics_cycle()` still runs on the unit vlm axis and we always size such that the y-label stays in view. --- piker/ui/_fsp.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 6da93b718c..d1c53db5af 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -844,9 +844,12 @@ def chart_curves( # vlm_chart.plotItem.layout.setMinimumWidth(0) # vlm_chart.removeAxis('left') vlm_viz = vlm_chart._vizs['volume'] - vlm_viz.render = False - # avoid range sorting on volume once disabled + # NOTE: DON'T DO THIS. + # WHY: we want range sorting on volume for the RHS label! + # -> if you don't want that then use this but likely you + # only will if we decide to drop unit vlm.. + # vlm_viz.render = False vlm_chart.view.disable_auto_yrange() # Trade rate overlay From 91d41ebf7680f04f1a517a3dadd6a9573542a113 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 3 Feb 2023 14:00:52 -0500 Subject: [PATCH 074/136] Allow y-range input via a `yranges: dict[Viz, tuple[float, float]]` --- piker/ui/_interaction.py | 44 +++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index dc0e47ed3a..e5dfb85180 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -956,6 +956,8 @@ def interact_graphics_cycle( debug_print: bool = False, do_overlay_scaling: bool = True, do_linked_charts: bool = True, + + yranges: tuple[float, float] | None = None, ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -1044,16 +1046,22 @@ def interact_graphics_cycle( profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') - out = viz.maxmin(i_read_range=i_read_range) - if out is None: - log.warning(f'No yrange provided for {name}!?') - return - ( - ixrng, - read_slc, - yrange - ) = out - profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') + yrange = yranges.get(viz) if yranges else None + if yrange is not None: + # print(f'INPUT {viz.name} yrange: {yrange}') + read_slc = slice(*i_read_range) + + else: + out = viz.maxmin(i_read_range=i_read_range) + if out is None: + log.warning(f'No yrange provided for {name}!?') + return + ( + _, # ixrng, + read_slc, + yrange + ) = out + profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') pi = viz.plot @@ -1078,19 +1086,16 @@ def interact_graphics_cycle( ): ymn, ymx = yrange # print(f'adding {viz.name} to overlay') - # mxmn_groups[viz.name] = out - # viz = chart._vizs[viz_name] # determine start datum in view arr = viz.shm.array in_view = arr[read_slc] + if not in_view.size: + log.warning(f'{viz.name} not in view?') + return + row_start = arr[read_slc.start - 1] - # y_med = (ymx - ymn) / 2 - # y_med = viz.median_from_range( - # read_slc.start, - # read_slc.stop, - # ) if viz.is_ohlc: y_start = row_start['open'] else: @@ -1103,7 +1108,6 @@ def interact_graphics_cycle( y_start, ymn, ymx, - # y_med, read_slc, in_view, ) @@ -1125,10 +1129,8 @@ def interact_graphics_cycle( # y_ref = y_med # up_rng = (ymx - y_ref) / y_ref # down_rng = (ymn - y_ref) / y_ref - # mx_up_rng = max(mx_up_rng, up_rng) # mn_down_rng = min(mn_down_rng, down_rng) - # print( # f'{viz.name}@{chart_name} group mxmn calc\n' # '--------------------\n' @@ -1159,10 +1161,10 @@ def interact_graphics_cycle( len(start_datums) < 2 or not do_overlay_scaling ): + # print(f'ONLY ranging major: {viz.name}') if not major_viz: major_viz = viz - # print(f'ONLY ranging major: {viz.name}') major_viz.plot.vb._set_yrange( yrange=yrange, ) From 25cf8df367ad40a4095289200ec37e68ad0e131e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 3 Feb 2023 14:01:55 -0500 Subject: [PATCH 075/136] Pass windowed y-mxmn to `.interact_graphics_cycle()` calls in display loop --- piker/ui/_display.py | 94 ++++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 46 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index cd0b3ac10f..6382a179e6 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -146,12 +146,11 @@ def multi_maxmin( profiler(f'vlm_viz.maxmin({read_slc})') return ( - mx, - # enforcing price can't be negative? # TODO: do we even need this? max(mn, 0), + mx, mx_vlm_in_view, # vlm max ) @@ -354,8 +353,8 @@ async def graphics_update_loop( vlm_viz = vlm_chart._vizs.get('volume') if vlm_chart else None ( - last_mx, last_mn, + last_mx, last_mx_vlm, ) = multi_maxmin( None, @@ -383,7 +382,7 @@ async def graphics_update_loop( # present differently -> likely dark vlm tick_size = symbol.tick_size - tick_margin = 3 * tick_size + tick_margin = 4 * tick_size fast_chart.show() last_quote_s = time.time() @@ -550,8 +549,14 @@ def graphics_update_cycle( # them as an additional graphic. clear_types = _tick_groups['clears'] - mx = varz['last_mx'] - mn = varz['last_mn'] + # TODO: fancier y-range sorting.. + # https://github.com/pikers/piker/issues/325 + # - a proper streaming mxmn algo as per above issue. + # - we should probably scale the view margin based on the size of + # the true range? This way you can slap in orders outside the + # current L1 (only) book range. + mx = lmx = varz['last_mx'] + mn = lmn = varz['last_mn'] mx_vlm_in_view = varz['last_mx_vlm'] # update ohlc sampled price bars @@ -561,24 +566,12 @@ def graphics_update_cycle( (liv and do_px_step) or trigger_all ): + # TODO: i think we're double calling this right now + # since .interact_graphics_cycle() also calls it? + # I guess we can add a guard in there? _, i_read_range, _ = main_viz.update_graphics() profiler('`Viz.update_graphics()` call') - ( - mx_in_view, - mn_in_view, - mx_vlm_in_view, - ) = multi_maxmin( - i_read_range, - main_viz, - ds.vlm_viz, - profiler, - ) - - mx = mx_in_view + tick_margin - mn = mn_in_view - tick_margin - profiler(f'{fqsn} `multi_maxmin()` call') - # don't real-time "shift" the curve to the # left unless we get one of the following: if ( @@ -594,6 +587,23 @@ def graphics_update_cycle( profiler('view incremented') + # NOTE: do this **after** the tread to ensure we take the yrange + # from the most current view x-domain. + ( + mn_in_view, + mx_in_view, + mx_vlm_in_view, + ) = multi_maxmin( + i_read_range, + main_viz, + ds.vlm_viz, + profiler, + ) + + mx = mx_in_view + tick_margin + mn = mn_in_view - tick_margin + profiler(f'{fqsn} `multi_maxmin()` call') + # iterate frames of ticks-by-type such that we only update graphics # using the last update per type where possible. ticks_by_type = quote.get('tbt', {}) @@ -679,14 +689,10 @@ def graphics_update_cycle( # Y-autoranging: adjust y-axis limits based on state tracking # of previous "last" L1 values which are in view. - lmx = varz['last_mx'] - lmn = varz['last_mn'] - mx_diff = mx - lmx mn_diff = mn - lmn - + mx_diff = mx - lmx if ( - mx_diff - or mn_diff + mx_diff or mn_diff ): # complain about out-of-range outliers which can show up # in certain annoying feeds (like ib).. @@ -705,7 +711,12 @@ def graphics_update_cycle( f'mn_diff: {mn_diff}\n' ) - # FAST CHART resize case + # TODO: track local liv maxmin without doing a recompute all the + # time..plus, just generally the user is more likely to be + # zoomed out enough on the slow chart that this is never an + # issue (the last datum going out of y-range). + + # FAST CHART y-auto-range resize case elif ( liv and not chart._static_yrange == 'axis' @@ -716,22 +727,15 @@ def graphics_update_cycle( main_vb._ic is None or not main_vb._ic.is_set() ): - # TODO: incremenal update of the median - # and maxmin driving the y-autoranging. - # yr = (mn, mx) + # print(f'SETTING Y-mxmx -> {main_viz.name}: {(mn, mx)}') main_vb.interact_graphics_cycle( # do_overlay_scaling=False, do_linked_charts=False, + yranges={main_viz: (mn, mx)}, ) - # TODO: we should probably scale - # the view margin based on the size - # of the true range? This way you can - # slap in orders outside the current - # L1 (only) book range. - profiler('main vb y-autorange') - # SLOW CHART resize case + # SLOW CHART y-auto-range resize case ( _, hist_liv, @@ -746,10 +750,6 @@ def graphics_update_cycle( ) profiler('hist `Viz.incr_info()`') - # TODO: track local liv maxmin without doing a recompute all the - # time..plut, just generally the user is more likely to be - # zoomed out enough on the slow chart that this is never an - # issue (the last datum going out of y-range). # hist_chart = ds.hist_chart # if ( # hist_liv @@ -764,7 +764,8 @@ def graphics_update_cycle( # XXX: update this every draw cycle to ensure y-axis auto-ranging # only adjusts when the in-view data co-domain actually expands or # contracts. - varz['last_mx'], varz['last_mn'] = mx, mn + varz['last_mn'] = mn + varz['last_mx'] = mx # TODO: a similar, only-update-full-path-on-px-step approach for all # fsp overlays and vlm stuff.. @@ -772,10 +773,12 @@ def graphics_update_cycle( # run synchronous update on all `Viz` overlays for curve_name, viz in chart._vizs.items(): + if viz.is_ohlc: + continue + # update any overlayed fsp flows if ( curve_name != fqsn - and not viz.is_ohlc ): update_fsp_chart( viz, @@ -788,8 +791,7 @@ def graphics_update_cycle( # px column to give the user the mx/mn # range of that set. if ( - curve_name != fqsn - and liv + liv # and not do_px_step # and not do_rt_update ): From 6ea64a7d2e632d34d31c8066494b8560c643b4da Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 6 Feb 2023 16:52:27 -0500 Subject: [PATCH 076/136] Iterate all charts (widgets) when only one overlay The reason (fsp) subcharts were not linked-updating correctly was because of the early termination of the interact update loop when only one "overlay" (aka no other overlays then the main curve) is detected. Obviously in this case we still need to iterate all linked charts in the set (presuming the user doesn't disable this). Also tweaks a few internals: - rename `start_datums: dict` -> `overlay_table`. - compact all "single curve" checks to one logic block. - don't collect curve info into the `overlay_table: dict` when `do_overlay_scaling=True`. --- piker/ui/_interaction.py | 41 ++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index e5dfb85180..4e5a1628af 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -951,7 +951,7 @@ def x_uppx(self) -> float: def interact_graphics_cycle( self, - *args, # capture signal-handler related shit + *args, # capture Qt signal (slot) inputs debug_print: bool = False, do_overlay_scaling: bool = True, @@ -1017,7 +1017,7 @@ def interact_graphics_cycle( # determine auto-ranging input for `._set_yrange()`. # this is primarly used for our so called "log-linearized # multi-plot" overlay technique. - start_datums: dict[ + overlay_table: dict[ ViewBox, tuple[ Viz, @@ -1032,6 +1032,10 @@ def interact_graphics_cycle( major_in_view: np.ndarray = None for name, viz in chart._vizs.items(): + if debug_print: + print( + f'UX GRAPHICS CYCLE: {viz.name}@{chart_name}' + ) if not viz.render: # print(f'skipping {flow.name}') @@ -1087,12 +1091,15 @@ def interact_graphics_cycle( ymn, ymx = yrange # print(f'adding {viz.name} to overlay') + if not do_overlay_scaling: + continue + # determine start datum in view arr = viz.shm.array in_view = arr[read_slc] if not in_view.size: log.warning(f'{viz.name} not in view?') - return + continue row_start = arr[read_slc.start - 1] @@ -1103,7 +1110,7 @@ def interact_graphics_cycle( profiler(f'{viz.name}@{chart_name} MINOR curve median') - start_datums[viz.plot.vb] = ( + overlay_table[viz.plot.vb] = ( viz, y_start, ymn, @@ -1152,16 +1159,20 @@ def interact_graphics_cycle( f'{viz.name}@{chart_name} simple std `._set_yrange()`' ) - profiler(f'<{chart_name}>.interact_graphics_cycle({name})') - if not start_datums: - return - - # if no overlays, set lone chart's yrange and short circuit + # NOTE: if no overlay group scaling is wanted by caller, or + # there were no overlay charts detected/collected, (could be + # either no group detected or chart with a single symbol, + # thus a single viz/overlay) then we ONLY set the lone + # chart's (viz) yrange and short circuit to the next chart + # in the linked charts sequence. if ( - len(start_datums) < 2 - or not do_overlay_scaling + not do_overlay_scaling + or len(overlay_table) < 2 + or not overlay_table ): - # print(f'ONLY ranging major: {viz.name}') + if debug_print: + print(f'ONLY ranging major: {viz.name}') + if not major_viz: major_viz = viz @@ -1169,7 +1180,9 @@ def interact_graphics_cycle( yrange=yrange, ) profiler(f'{viz.name}@{chart_name} single curve yrange') - return + continue + + profiler(f'<{chart_name}>.interact_graphics_cycle({name})') # conduct "log-linearized multi-plot" scalings for all groups for ( @@ -1183,7 +1196,7 @@ def interact_graphics_cycle( read_slc, minor_in_view, ) - ) in start_datums.items(): + ) in overlay_table.items(): # we use the ymn/mx verbatim from the major curve # (i.e. the curve measured to have the highest From 3daee0caa992a87445e45053c562943693d92dad Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 6 Feb 2023 17:29:00 -0500 Subject: [PATCH 077/136] Disable overlay scaling on per-symbol-feed updates Since each symbol's feed is multiplexed by quote key (an fqsn), we can avoid scaling overlay curves on any single update, presuming each quote driven cycle will trigger **only** the specific symbol's curve. Also disables fsp `.interact_graphics_cycle()` calls for now since it seems they aren't really that critical to and we should be using the same technique as above (doing incremental y-range checks/updates) for FSPs as well. --- piker/ui/_display.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 6382a179e6..776d9bc3f7 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -727,10 +727,10 @@ def graphics_update_cycle( main_vb._ic is None or not main_vb._ic.is_set() ): - # print(f'SETTING Y-mxmx -> {main_viz.name}: {(mn, mx)}') + print(f'SETTING Y-mxmx -> {main_viz.name}: {(mn, mx)}') main_vb.interact_graphics_cycle( - # do_overlay_scaling=False, do_linked_charts=False, + do_overlay_scaling=False, yranges={main_viz: (mn, mx)}, ) profiler('main vb y-autorange') @@ -858,14 +858,13 @@ def graphics_update_cycle( # vlm_yr = (0, mx_vlm_in_view * 1.375) main_vlm_viz.plot.vb.interact_graphics_cycle( - # do_overlay_scaling=False, + do_overlay_scaling=False, do_linked_charts=False, ) profiler('`vlm_chart.view.interact_graphics_cycle()`') # update all downstream FSPs for curve_name, viz in vlm_vizs.items(): - if curve_name == 'volume': continue @@ -890,9 +889,10 @@ def graphics_update_cycle( # XXX: without this we get completely # mangled/empty vlm display subchart.. fvb = viz.plot.vb - fvb.interact_graphics_cycle( - do_linked_charts=False, - ) + # fvb.interact_graphics_cycle( + # do_linked_charts=False, + # do_overlay_scaling=False, + # ) profiler( f'Viz[{viz.name}].plot.vb.interact_graphics_cycle()`' ) From c57567ab0d2b186b115ada477584962ea726dbb5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 16:09:18 -0500 Subject: [PATCH 078/136] No-overlays, y-ranging optimizations When the caller passes `do_overlay_scaling=False` we skip the given chart's `Viz` iteration loop, and set the yrange immediately, then continue to the next chart (if `do_linked_charts` is set) instead of a `continue` short circuit within the viz sub-loop. Deats: - add a `_maybe_calc_yrange()` helper which makes the `yranges` provided-or-not case logic more terse (factored). - add a `do_linked_charts=False` short circuit. - drop the legacy commented swing % calcs stuff. - use the `ChartView._viz` when `do_overlay_scaling=False` thus presuming that we want to handle the viz mapped to *this* view box. - add a `._yrange` "last set yrange" tracking var which keeps record of the last ymn/ymx value set in `._set_yrange()` BEFORE doing range margins; this will be used for incremental update in the display loop. --- piker/ui/_interaction.py | 157 ++++++++++++++++++++++++--------------- 1 file changed, 99 insertions(+), 58 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 4e5a1628af..08ffb6cb27 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -445,6 +445,7 @@ def __init__( # TODO: probably just assign this whenever a new `PlotItem` is # allocated since they're 1to1 with views.. self._viz: Viz | None = None + self._yrange: tuple[float, float] | None = None def start_ic( self, @@ -483,7 +484,7 @@ def signal_ic( async def open_async_input_handler( self, - ) -> 'ChartView': + ) -> ChartView: async with ( _event.open_handlers( @@ -785,7 +786,7 @@ def _set_yrange( # NOTE: this value pairs (more or less) with L1 label text # height offset from from the bid/ask lines. - range_margin: float | None = 0.09, + range_margin: float | None = 0.1, bars_range: Optional[tuple[int, int, int, int]] = None, @@ -858,6 +859,11 @@ def _set_yrange( ylow, yhigh = yrange + # always stash last range for diffing by + # incremental update calculations BEFORE adding + # margin. + self._yrange = ylow, yhigh + # view margins: stay within a % of the "true range" if range_margin is not None: diff = yhigh - ylow @@ -870,10 +876,6 @@ def _set_yrange( yhigh * (1 + range_margin), ) - # XXX: this often needs to be unset - # to get different view modes to operate - # correctly! - # print( # f'set limits {self.name}:\n' # f'ylow: {ylow}\n' @@ -975,7 +977,6 @@ def interact_graphics_cycle( # ms_threshold=4, ) - chart = self._chart linked = self.linked if ( do_linked_charts @@ -983,7 +984,9 @@ def interact_graphics_cycle( ): plots = {linked.chart.name: linked.chart} plots |= linked.subplots + else: + chart = self._chart plots = {chart.name: chart} # TODO: a faster single-loop-iterator way of doing this? @@ -1031,42 +1034,45 @@ def interact_graphics_cycle( ] = {} major_in_view: np.ndarray = None - for name, viz in chart._vizs.items(): + # ONLY auto-yrange the viz mapped to THIS view box + if not do_overlay_scaling: + viz = self._viz if debug_print: - print( - f'UX GRAPHICS CYCLE: {viz.name}@{chart_name}' - ) + print(f'ONLY ranging THIS viz: {viz.name}') - if not viz.render: - # print(f'skipping {flow.name}') + out = _maybe_calc_yrange( + viz, + yranges, + profiler, + chart_name, + ) + if out is None: continue - # pass in no array which will read and render from the last - # passed array (normally provided by the display loop.) - in_view, i_read_range, _ = viz.update_graphics() + read_slc, yrange = out + viz.plot.vb._set_yrange(yrange=yrange) + profiler(f'{viz.name}@{chart_name} single curve yrange') - if not in_view: - continue + # don't iterate overlays, just move to next chart + continue - profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') + for name, viz in chart._vizs.items(): - yrange = yranges.get(viz) if yranges else None - if yrange is not None: - # print(f'INPUT {viz.name} yrange: {yrange}') - read_slc = slice(*i_read_range) + if debug_print: + print( + f'UX GRAPHICS CYCLE: {viz.name}@{chart_name}' + ) - else: - out = viz.maxmin(i_read_range=i_read_range) - if out is None: - log.warning(f'No yrange provided for {name}!?') - return - ( - _, # ixrng, - read_slc, - yrange - ) = out - profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') + out = _maybe_calc_yrange( + viz, + yranges, + profiler, + chart_name, + ) + if out is None: + continue + read_slc, yrange = out pi = viz.plot # handle multiple graphics-objs per viewbox cases @@ -1091,9 +1097,6 @@ def interact_graphics_cycle( ymn, ymx = yrange # print(f'adding {viz.name} to overlay') - if not do_overlay_scaling: - continue - # determine start datum in view arr = viz.shm.array in_view = arr[read_slc] @@ -1132,24 +1135,6 @@ def interact_graphics_cycle( major_in_view = in_view profiler(f'{viz.name}@{chart_name} set new major') - # compute directional (up/down) y-range % swing/dispersion - # y_ref = y_med - # up_rng = (ymx - y_ref) / y_ref - # down_rng = (ymn - y_ref) / y_ref - # mx_up_rng = max(mx_up_rng, up_rng) - # mn_down_rng = min(mn_down_rng, down_rng) - # print( - # f'{viz.name}@{chart_name} group mxmn calc\n' - # '--------------------\n' - # f'y_start: {y_start}\n' - # f'ymn: {ymn}\n' - # f'ymx: {ymx}\n' - # f'mx_disp: {mx_disp}\n' - # f'up %: {up_rng * 100}\n' - # f'down %: {down_rng * 100}\n' - # f'mx up %: {mx_up_rng * 100}\n' - # f'mn down %: {mn_down_rng * 100}\n' - # ) profiler(f'{viz.name}@{chart_name} MINOR curve scale') # non-overlay group case @@ -1165,21 +1150,32 @@ def interact_graphics_cycle( # thus a single viz/overlay) then we ONLY set the lone # chart's (viz) yrange and short circuit to the next chart # in the linked charts sequence. + if ( - not do_overlay_scaling - or len(overlay_table) < 2 + len(overlay_table) < 2 or not overlay_table ): if debug_print: print(f'ONLY ranging major: {viz.name}') + # we're either in `do_overlay_scaling=False` mode + # or there is only one curve so we need to pick + # that "only curve". if not major_viz: major_viz = viz + if yranges is not None: + yrange = yranges.get(major_viz) or yrange + + assert yrange + print(f'ONLY ranging major: {viz.name}') major_viz.plot.vb._set_yrange( yrange=yrange, ) profiler(f'{viz.name}@{chart_name} single curve yrange') + if not do_linked_charts: + return + continue profiler(f'<{chart_name}>.interact_graphics_cycle({name})') @@ -1192,7 +1188,6 @@ def interact_graphics_cycle( y_start, y_min, y_max, - # y_med, read_slc, minor_in_view, ) @@ -1400,3 +1395,49 @@ def interact_graphics_cycle( # breakpoint() profiler.finish() + + +def _maybe_calc_yrange( + viz: Viz, + yranges: dict[Viz, tuple[float, float]], + profiler: Profiler, + chart_name: str, + +) -> tuple[slice, tuple[float, float]] | None: + + if not viz.render: + return + # # print(f'skipping {flow.name}') + # continue + + # pass in no array which will read and render from the last + # passed array (normally provided by the display loop.) + in_view, i_read_range, _ = viz.update_graphics() + + if not in_view: + return + # continue + + profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') + + # check if explicit yranges were passed in by the caller + yrange = yranges.get(viz) if yranges else None + if yrange is not None: + read_slc = slice(*i_read_range) + + else: + out = viz.maxmin(i_read_range=i_read_range) + if out is None: + log.warning(f'No yrange provided for {viz.name}!?') + return + ( + _, # ixrng, + read_slc, + yrange + ) = out + profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') + + return ( + read_slc, + yrange, + ) From a7db6adc2e2e5936f0158a6843496b123f1d6f28 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 16:31:18 -0500 Subject: [PATCH 079/136] Always set the `ChartView._viz` for each plot --- piker/ui/_chart.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 96187bf2b7..d2190d42dc 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1204,6 +1204,10 @@ def draw_curve( ) pi.viz = viz + # so that viewboxes are associated 1-to-1 with + # their parent plotitem + pi.vb._viz = viz + assert isinstance(viz.shm, ShmArray) # TODO: this probably needs its own method? From 0a939311fe6aedd782368e2cc35a8354c1cbce16 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 16:32:27 -0500 Subject: [PATCH 080/136] Only set the specific view's yrange per quote Somewhat of a facepalm but, for incremental update of the auto-yrange from quotes in the display loop obviously we only want to update the associated `Viz`/viewbox for *that* fqsn. Further we don't need to worry about the whole "tick margin" stuff since `._set_yrange()` already adds margin to the yrange by default; thus we remove all of that. --- piker/ui/_display.py | 59 +++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 776d9bc3f7..a350c09e29 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -182,7 +182,6 @@ class DisplayState(Struct): # misc state tracking vars: dict[str, Any] = field( default_factory=lambda: { - 'tick_margin': 0, 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, @@ -192,7 +191,6 @@ class DisplayState(Struct): ) hist_vars: dict[str, Any] = field( default_factory=lambda: { - 'tick_margin': 0, 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, @@ -262,7 +260,7 @@ async def increment_history_view( if liv: hist_viz.plot.vb.interact_graphics_cycle( do_linked_charts=False, - # do_overlay_scaling=False, + do_overlay_scaling=False, ) profiler('hist chart yrange view') @@ -381,9 +379,6 @@ async def graphics_update_loop( # levels this might be dark volume we need to # present differently -> likely dark vlm - tick_size = symbol.tick_size - tick_margin = 4 * tick_size - fast_chart.show() last_quote_s = time.time() @@ -408,7 +403,6 @@ async def graphics_update_loop( 'l1': l1, 'vars': { - 'tick_margin': tick_margin, 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': last_mx_vlm, @@ -529,8 +523,6 @@ def graphics_update_cycle( main_viz = ds.viz index_field = main_viz.index_field - tick_margin = varz['tick_margin'] - ( uppx, liv, @@ -555,14 +547,16 @@ def graphics_update_cycle( # - we should probably scale the view margin based on the size of # the true range? This way you can slap in orders outside the # current L1 (only) book range. - mx = lmx = varz['last_mx'] - mn = lmn = varz['last_mn'] + main_vb = main_viz.plot.vb + this_viz = chart._vizs[fqsn] + this_vb = this_viz.plot.vb + lmn, lmx = this_vb._yrange + mx = lmx + mn = lmn mx_vlm_in_view = varz['last_mx_vlm'] # update ohlc sampled price bars if ( - # do_rt_update - # or do_px_step (liv and do_px_step) or trigger_all ): @@ -590,8 +584,8 @@ def graphics_update_cycle( # NOTE: do this **after** the tread to ensure we take the yrange # from the most current view x-domain. ( - mn_in_view, - mx_in_view, + mn, + mx, mx_vlm_in_view, ) = multi_maxmin( i_read_range, @@ -600,8 +594,6 @@ def graphics_update_cycle( profiler, ) - mx = mx_in_view + tick_margin - mn = mn_in_view - tick_margin profiler(f'{fqsn} `multi_maxmin()` call') # iterate frames of ticks-by-type such that we only update graphics @@ -625,8 +617,20 @@ def graphics_update_cycle( # TODO: make sure IB doesn't send ``-1``! and price > 0 ): - mx = max(price + tick_margin, mx) - mn = min(price - tick_margin, mn) + if ( + price < mn + ): + mn = price + # print(f'{this_viz.name} new MN from TICK {mn}') + + if ( + price > mx + ): + mx = price + # print(f'{this_viz.name} new MX from TICK {mx}') + + # mx = max(price, mx) + # mn = min(price, mn) # clearing price update: # generally, we only want to update grahpics from the *last* @@ -691,8 +695,14 @@ def graphics_update_cycle( # of previous "last" L1 values which are in view. mn_diff = mn - lmn mx_diff = mx - lmx + if ( - mx_diff or mn_diff + mn_diff or mx_diff # covers all cases below? + # (mx - lmx) > 0 # upward expansion + # or (mn - lmn) < 0 # downward expansion + + # or (lmx - mx) > 0 # upward contraction + # or (lmn - mn) < 0 # downward contraction ): # complain about out-of-range outliers which can show up # in certain annoying feeds (like ib).. @@ -721,17 +731,16 @@ def graphics_update_cycle( liv and not chart._static_yrange == 'axis' ): - main_vb = main_viz.plot.vb if ( main_vb._ic is None or not main_vb._ic.is_set() ): - print(f'SETTING Y-mxmx -> {main_viz.name}: {(mn, mx)}') - main_vb.interact_graphics_cycle( + # print(f'SETTING Y-mnmx -> {main_viz.name}: {(mn, mx)}') + this_vb.interact_graphics_cycle( do_linked_charts=False, do_overlay_scaling=False, - yranges={main_viz: (mn, mx)}, + yranges={this_viz: (mn, mx)}, ) profiler('main vb y-autorange') @@ -888,7 +897,7 @@ def graphics_update_cycle( # resizing from last quote?) # XXX: without this we get completely # mangled/empty vlm display subchart.. - fvb = viz.plot.vb + # fvb = viz.plot.vb # fvb.interact_graphics_cycle( # do_linked_charts=False, # do_overlay_scaling=False, From c646b435bfeaace1ec9c548bca79d2bc02d7ee02 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 17:18:29 -0500 Subject: [PATCH 081/136] Incrementally set vlm chart yrange per quote --- piker/ui/_display.py | 54 +++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index a350c09e29..a19581597e 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -28,6 +28,7 @@ from typing import ( Optional, Any, + TYPE_CHECKING, ) import tractor @@ -82,6 +83,9 @@ from ..log import get_logger from .._profile import Profiler +if TYPE_CHECKING: + from ._interaction import ChartView + log = get_logger(__name__) @@ -185,8 +189,8 @@ class DisplayState(Struct): 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, - 'last_mx': 0, - 'last_mn': 0, + # 'last_mx': 0, + # 'last_mn': 0, } ) hist_vars: dict[str, Any] = field( @@ -194,8 +198,8 @@ class DisplayState(Struct): 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, - 'last_mx': 0, - 'last_mn': 0, + # 'last_mx': 0, + # 'last_mn': 0, } ) @@ -406,8 +410,8 @@ async def graphics_update_loop( 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': last_mx_vlm, - 'last_mx': last_mx, - 'last_mn': last_mn, + # 'last_mx': last_mx, + # 'last_mn': last_mn, }, 'globalz': globalz, }) @@ -513,7 +517,7 @@ def graphics_update_cycle( chart = ds.chart vlm_chart = ds.vlm_chart - varz = ds.vars + # varz = ds.vars l1 = ds.l1 flume = ds.flume ohlcv = flume.rt_shm @@ -547,13 +551,13 @@ def graphics_update_cycle( # - we should probably scale the view margin based on the size of # the true range? This way you can slap in orders outside the # current L1 (only) book range. - main_vb = main_viz.plot.vb - this_viz = chart._vizs[fqsn] - this_vb = this_viz.plot.vb + main_vb: ChartView = main_viz.plot.vb + this_viz: Viz = chart._vizs[fqsn] + this_vb: ChartView = this_viz.plot.vb lmn, lmx = this_vb._yrange - mx = lmx - mn = lmn - mx_vlm_in_view = varz['last_mx_vlm'] + mx: float = lmx + mn: float = lmn + mx_vlm_in_view: float | None = None # update ohlc sampled price bars if ( @@ -773,8 +777,8 @@ def graphics_update_cycle( # XXX: update this every draw cycle to ensure y-axis auto-ranging # only adjusts when the in-view data co-domain actually expands or # contracts. - varz['last_mn'] = mn - varz['last_mx'] = mx + # varz['last_mn'] = mn + # varz['last_mx'] = mx # TODO: a similar, only-update-full-path-on-px-step approach for all # fsp overlays and vlm stuff.. @@ -820,8 +824,9 @@ def graphics_update_cycle( # TODO: can we unify this with the above loop? if vlm_chart: vlm_vizs = vlm_chart._vizs - main_vlm_viz = vlm_vizs['volume'] + main_vlm_vb = main_vlm_viz.plot.vb + (_, vlm_ymx) = vlm_yrange = main_vlm_vb._yrange # always update y-label ds.vlm_sticky.update_from_data( @@ -859,16 +864,19 @@ def graphics_update_cycle( profiler('`main_vlm_viz.update_graphics()`') if ( - mx_vlm_in_view != varz['last_mx_vlm'] + mx_vlm_in_view + and mx_vlm_in_view != vlm_ymx ): - varz['last_mx_vlm'] = mx_vlm_in_view - - # TODO: incr maxmin update as pass into below.. - # vlm_yr = (0, mx_vlm_in_view * 1.375) - + # in this case we want to scale all overlays in the + # sub-chart but only incrementally update the vlm since + # we already calculated the new range above. + # TODO: in theory we can incrementally update all + # overlays as well though it will require iteration of + # them here in the display loop right? main_vlm_viz.plot.vb.interact_graphics_cycle( - do_overlay_scaling=False, + do_overlay_scaling=True, do_linked_charts=False, + yranges={main_vlm_viz: vlm_yrange}, ) profiler('`vlm_chart.view.interact_graphics_cycle()`') From 74c215d5b2cdff82d71a047f2be3d1cd61042d87 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 17:23:36 -0500 Subject: [PATCH 082/136] Lel, always meant to no-cache the step curve.. --- piker/ui/_curve.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index c9ebebcd3e..358d4844f2 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -407,9 +407,6 @@ def draw_last_datum( # (via it's max / min) even when highly zoomed out. class FlattenedOHLC(Curve): - # avoids strange dragging/smearing artifacts when panning.. - cache_mode: int = QGraphicsItem.NoCache - def draw_last_datum( self, path: QPainterPath, @@ -435,6 +432,11 @@ def draw_last_datum( class StepCurve(Curve): + # avoids strange dragging/smearing artifacts when panning + # as well as mouse over artefacts when the vlm chart series + # is "shorter" then some overlay.. + cache_mode: int = QGraphicsItem.NoCache + def declare_paintables( self, ) -> None: From 972b723a5d42ad5e08f23cef39f5a1b6824d16c5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 8 Feb 2023 18:22:07 -0500 Subject: [PATCH 083/136] Skip overlay transform calcs on common-pi curves If there is a common `PlotItem` used for a set of `Viz`/curves (on a given view) we don't need to do overlay scaling and thus can also short circuit the viz iteration loop early. --- piker/ui/_interaction.py | 80 +++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 37 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 08ffb6cb27..fad2c13e84 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -1088,12 +1088,19 @@ def interact_graphics_cycle( profiler(f'{viz.name}@{chart_name} common pi sort') + # non-overlay group case + if not viz.is_ohlc: + pi.vb._set_yrange(yrange=yrange) + profiler( + f'{viz.name}@{chart_name} simple std `._set_yrange()`' + ) + # handle overlay log-linearized group scaling cases # TODO: a better predicate here, likely something # to do with overlays and their settings.. - if ( - viz.is_ohlc - ): + # TODO: we probably eventually might want some other + # charts besides OHLC? + else: ymn, ymx = yrange # print(f'adding {viz.name} to overlay') @@ -1137,45 +1144,44 @@ def interact_graphics_cycle( profiler(f'{viz.name}@{chart_name} MINOR curve scale') - # non-overlay group case - else: - pi.vb._set_yrange(yrange=yrange) - profiler( - f'{viz.name}@{chart_name} simple std `._set_yrange()`' - ) - - # NOTE: if no overlay group scaling is wanted by caller, or - # there were no overlay charts detected/collected, (could be - # either no group detected or chart with a single symbol, - # thus a single viz/overlay) then we ONLY set the lone - # chart's (viz) yrange and short circuit to the next chart - # in the linked charts sequence. - + # NOTE: if no there were no overlay charts + # detected/collected (could be either no group detected or + # chart with a single symbol, thus a single viz/overlay) + # then we ONLY set the lone chart's (viz) yrange and short + # circuit to the next chart in the linked charts loop. IOW + # there's no reason to go through the overlay dispersion + # scaling in the next loop below when only one curve is + # detected. if ( - len(overlay_table) < 2 - or not overlay_table + not mxmns_by_common_pi + and len(overlay_table) < 2 ): if debug_print: print(f'ONLY ranging major: {viz.name}') - # we're either in `do_overlay_scaling=False` mode - # or there is only one curve so we need to pick - # that "only curve". - if not major_viz: - major_viz = viz - - if yranges is not None: - yrange = yranges.get(major_viz) or yrange - - assert yrange - print(f'ONLY ranging major: {viz.name}') - major_viz.plot.vb._set_yrange( - yrange=yrange, + out = _maybe_calc_yrange( + viz, + yranges, + profiler, + chart_name, ) + if out is None: + continue + + read_slc, yrange = out + viz.plot.vb._set_yrange(yrange=yrange) profiler(f'{viz.name}@{chart_name} single curve yrange') - if not do_linked_charts: - return + # move to next chart in linked set since + # no overlay transforming is needed. + continue + + elif ( + mxmns_by_common_pi + and not major_viz + ): + # move to next chart in linked set since + # no overlay transforming is needed. continue profiler(f'<{chart_name}>.interact_graphics_cycle({name})') @@ -1394,6 +1400,9 @@ def interact_graphics_cycle( # if vrs[1][0] > major_mn: # breakpoint() + if not do_linked_charts: + return + profiler.finish() @@ -1407,8 +1416,6 @@ def _maybe_calc_yrange( if not viz.render: return - # # print(f'skipping {flow.name}') - # continue # pass in no array which will read and render from the last # passed array (normally provided by the display loop.) @@ -1416,7 +1423,6 @@ def _maybe_calc_yrange( if not in_view: return - # continue profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') From db1e0a04f8cfe9f13594d92e87227f24ad18e19d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Feb 2023 09:21:04 -0500 Subject: [PATCH 084/136] Only use last `ChartView._yrange` if set --- piker/ui/_display.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index a19581597e..a91246e601 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -554,7 +554,12 @@ def graphics_update_cycle( main_vb: ChartView = main_viz.plot.vb this_viz: Viz = chart._vizs[fqsn] this_vb: ChartView = this_viz.plot.vb - lmn, lmx = this_vb._yrange + this_yr = this_vb._yrange + if this_yr: + lmn, lmx = this_yr + else: + lmn = lmx = 0 + mx: float = lmx mn: float = lmn mx_vlm_in_view: float | None = None From 2d7359851f925c41c3dc2dc21c10d15efa73e533 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Feb 2023 09:21:30 -0500 Subject: [PATCH 085/136] Go back to no-cache on OHLC downsample line --- piker/ui/_curve.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 358d4844f2..1e3c4e758a 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -407,6 +407,11 @@ def draw_last_datum( # (via it's max / min) even when highly zoomed out. class FlattenedOHLC(Curve): + # avoids strange dragging/smearing artifacts when panning + # as well as mouse over artefacts when the vlm chart series + # is "shorter" then some overlay.. + cache_mode: int = QGraphicsItem.NoCache + def draw_last_datum( self, path: QPainterPath, From cda3bcc1f60d8fc84a5f8f20b51bcb88cae9e984 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Feb 2023 14:33:27 -0500 Subject: [PATCH 086/136] Expose `._set_yrange()` kwargs via `yrange_kwargs: dict` Since it can be desirable to dynamically adjust inputs to the y-ranging method (such as in the display loop when a chart is very zoomed in), this adds such support through a new `yrange_kwargs: dict[Viz, dict]` which replaces the `yrange` tuple we were passing through prior. Also, adjusts the y-range margin back to the original 0.09 of the diff now that we can support dynamic control. --- piker/ui/_interaction.py | 45 ++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index fad2c13e84..7682b8f720 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -25,6 +25,7 @@ ) import time from typing import ( + Any, Optional, Callable, TYPE_CHECKING, @@ -786,7 +787,7 @@ def _set_yrange( # NOTE: this value pairs (more or less) with L1 label text # height offset from from the bid/ask lines. - range_margin: float | None = 0.1, + range_margin: float | None = 0.09, bars_range: Optional[tuple[int, int, int, int]] = None, @@ -959,7 +960,10 @@ def interact_graphics_cycle( do_overlay_scaling: bool = True, do_linked_charts: bool = True, - yranges: tuple[float, float] | None = None, + yrange_kwargs: dict[ + str, + tuple[float, float], + ] | None = None, ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -1011,8 +1015,6 @@ def interact_graphics_cycle( major_viz: Viz = None major_mx: float = 0 major_mn: float = float('inf') - # mx_up_rng: float = 0 - # mn_down_rng: float = 0 mx_disp: float = 0 # collect certain flows have grapics objects **in seperate @@ -1042,15 +1044,15 @@ def interact_graphics_cycle( out = _maybe_calc_yrange( viz, - yranges, + yrange_kwargs, profiler, chart_name, ) if out is None: continue - read_slc, yrange = out - viz.plot.vb._set_yrange(yrange=yrange) + read_slc, yrange_kwargs = out + viz.plot.vb._set_yrange(**yrange_kwargs) profiler(f'{viz.name}@{chart_name} single curve yrange') # don't iterate overlays, just move to next chart @@ -1065,14 +1067,15 @@ def interact_graphics_cycle( out = _maybe_calc_yrange( viz, - yranges, + yrange_kwargs, profiler, chart_name, ) if out is None: continue - read_slc, yrange = out + read_slc, yrange_kwargs = out + yrange = yrange_kwargs['yrange'] pi = viz.plot # handle multiple graphics-objs per viewbox cases @@ -1161,15 +1164,15 @@ def interact_graphics_cycle( out = _maybe_calc_yrange( viz, - yranges, + yrange_kwargs, profiler, chart_name, ) if out is None: continue - read_slc, yrange = out - viz.plot.vb._set_yrange(yrange=yrange) + read_slc, yrange_kwargs = out + viz.plot.vb._set_yrange(**yrange_kwargs) profiler(f'{viz.name}@{chart_name} single curve yrange') # move to next chart in linked set since @@ -1358,8 +1361,6 @@ def interact_graphics_cycle( '--------------------\n' f'y_minor_intersect: {y_minor_intersect}\n' f'y_major_intersect: {y_major_intersect}\n' - # f'mn_down_rng: {mn_down_rng * 100}\n' - # f'mx_up_rng: {mx_up_rng * 100}\n' f'scaled ymn: {ymn}\n' f'scaled ymx: {ymx}\n' f'scaled mx_disp: {mx_disp}\n' @@ -1408,11 +1409,14 @@ def interact_graphics_cycle( def _maybe_calc_yrange( viz: Viz, - yranges: dict[Viz, tuple[float, float]], + yrange_kwargs: dict[Viz, dict[str, Any]], profiler: Profiler, chart_name: str, -) -> tuple[slice, tuple[float, float]] | None: +) -> tuple[ + slice, + dict, +] | None: if not viz.render: return @@ -1426,9 +1430,9 @@ def _maybe_calc_yrange( profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') - # check if explicit yranges were passed in by the caller - yrange = yranges.get(viz) if yranges else None - if yrange is not None: + # check if explicit yrange (kwargs) was passed in by the caller + yrange_kwargs = yrange_kwargs.get(viz) if yrange_kwargs else None + if yrange_kwargs is not None: read_slc = slice(*i_read_range) else: @@ -1442,8 +1446,9 @@ def _maybe_calc_yrange( yrange ) = out profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') + yrange_kwargs = {'yrange': yrange} return ( read_slc, - yrange, + yrange_kwargs, ) From 091afccb72e32e83fb9ed2b25fe3cf5b3230f038 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Feb 2023 14:42:01 -0500 Subject: [PATCH 087/136] Dynamically adjust y-range margin in display loop When zoomed in alot, and thus a quote driven y-range resize takes place, it makes more sense to increase the `range_margin: float` input to `._set_yrange()` to ensure all L1 labels stay in view; generally the more zoomed in, - the smaller the y-range is and thus the larger the needed margin (on that range's dispersion diff) would be, - it's more likely to get a last datum move outside the previous range. Also, always do overlayT style scaling on the slow chart whenever it treads. --- piker/ui/_display.py | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index a91246e601..d209159453 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -264,7 +264,7 @@ async def increment_history_view( if liv: hist_viz.plot.vb.interact_graphics_cycle( do_linked_charts=False, - do_overlay_scaling=False, + do_overlay_scaling=True, # always overlayT slow chart ) profiler('hist chart yrange view') @@ -560,9 +560,10 @@ def graphics_update_cycle( else: lmn = lmx = 0 - mx: float = lmx mn: float = lmn + mx: float = lmx mx_vlm_in_view: float | None = None + yrange_margin = 0.09 # update ohlc sampled price bars if ( @@ -630,13 +631,15 @@ def graphics_update_cycle( price < mn ): mn = price - # print(f'{this_viz.name} new MN from TICK {mn}') + yrange_margin = 0.16 + # # print(f'{this_viz.name} new MN from TICK {mn}') if ( price > mx ): mx = price - # print(f'{this_viz.name} new MX from TICK {mx}') + yrange_margin = 0.16 + # # print(f'{this_viz.name} new MX from TICK {mx}') # mx = max(price, mx) # mn = min(price, mn) @@ -740,6 +743,18 @@ def graphics_update_cycle( liv and not chart._static_yrange == 'axis' ): + # NOTE: this auto-yranging approach is a sort of, hybrid, + # between always aligning overlays to the their common ref + # sample and not updating at all: + # - whenever an interaction happens the overlays are scaled + # to one another and thus are ref-point aligned and + # scaled. + # - on treads and range updates due to new mn/mx from last + # datum, we don't scale to the overlayT instead only + # adjusting when the latest datum is outside the previous + # dispersion range. + mn = min(mn, lmn) + mx = max(mx, lmx) if ( main_vb._ic is None @@ -748,8 +763,16 @@ def graphics_update_cycle( # print(f'SETTING Y-mnmx -> {main_viz.name}: {(mn, mx)}') this_vb.interact_graphics_cycle( do_linked_charts=False, + # TODO: we could optionally offer always doing this + # on treads thus always keeping fast-chart overlays + # aligned by their LHS datum? do_overlay_scaling=False, - yranges={this_viz: (mn, mx)}, + yrange_kwargs={ + this_viz: { + 'yrange': (mn, mx), + 'range_margin': yrange_margin, + }, + } ) profiler('main vb y-autorange') @@ -881,7 +904,12 @@ def graphics_update_cycle( main_vlm_viz.plot.vb.interact_graphics_cycle( do_overlay_scaling=True, do_linked_charts=False, - yranges={main_vlm_viz: vlm_yrange}, + yrange_kwargs={ + main_vlm_viz: { + 'yrange': vlm_yrange, + # 'range_margin': yrange_margin, + }, + }, ) profiler('`vlm_chart.view.interact_graphics_cycle()`') From 3dc1f66ff6cc8d65757c3eb156a13b3ca473b9e1 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Feb 2023 14:57:00 -0500 Subject: [PATCH 088/136] Go back to caching on all curves Despite there being artifacts when interacting, the speedups when cross-hair-ing are just too good to ignore. We can always play with disabling caches when interaction takes place much like we do with feed pausing. --- piker/ui/_curve.py | 4 ++-- piker/ui/_ohlc.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 1e3c4e758a..4206417358 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -410,7 +410,7 @@ class FlattenedOHLC(Curve): # avoids strange dragging/smearing artifacts when panning # as well as mouse over artefacts when the vlm chart series # is "shorter" then some overlay.. - cache_mode: int = QGraphicsItem.NoCache + # cache_mode: int = QGraphicsItem.NoCache def draw_last_datum( self, @@ -440,7 +440,7 @@ class StepCurve(Curve): # avoids strange dragging/smearing artifacts when panning # as well as mouse over artefacts when the vlm chart series # is "shorter" then some overlay.. - cache_mode: int = QGraphicsItem.NoCache + # cache_mode: int = QGraphicsItem.NoCache def declare_paintables( self, diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 104b860cf0..344805e8a9 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -93,7 +93,7 @@ class BarItems(FlowGraphic): ''' # XXX: causes this weird jitter bug when click-drag panning # where the path curve will awkwardly flicker back and forth? - cache_mode: int = QGraphicsItem.NoCache + # cache_mode: int = QGraphicsItem.NoCache def __init__( self, From a6d1053c5017680673bcac0e7342ae4865e6d183 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 11 Feb 2023 15:36:02 -0500 Subject: [PATCH 089/136] Facepalm, align overlay plot view exactly to parent Previously we were aligning the child's `PlotItem` to the "root" (top most) overlays `ViewBox`..smh. This is why there was a weird gap on the LHS next to the 'left' price axes: something weird in the implied axes offsets was getting jammed in that rect. Also comments out "the-skipping-of" moving axes from the overlay's `PlotItem.layout` to the root's linear layout(s) when an overlay's axis is read as not visible; this isn't really necessary nor useful and if we want to remove the axes entirely we should do it explicitly and/or provide a way through the `ComposeGridLayout` API. --- piker/ui/_overlay.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/piker/ui/_overlay.py b/piker/ui/_overlay.py index ad11c5e490..6b2d1bd503 100644 --- a/piker/ui/_overlay.py +++ b/piker/ui/_overlay.py @@ -201,18 +201,19 @@ def insert_plotitem( axis_view = axis.linkedView() assert axis_view is plotitem.vb - if ( - not axis.isVisible() - - # XXX: we never skip moving the axes for the *root* - # plotitem inserted (even if not shown) since we need to - # move all the hidden axes into linear sub-layouts for - # that "central" plot in the overlay. Also if we don't - # do it there's weird geomoetry calc offsets that make - # view coords slightly off somehow .. smh - and not len(self.pitems) == 0 - ): - continue + # if ( + # not axis.isVisible() + + # # XXX: we never skip moving the axes for the *root* + # # plotitem inserted (even if not shown) since we need to + # # move all the hidden axes into linear sub-layouts for + # # that "central" plot in the overlay. Also if we don't + # # do it there's weird geomoetry calc offsets that make + # # view coords slightly off somehow .. smh + # and not len(self.pitems) == 0 + # ): + # print(f'SKIPPING MOVE: {plotitem.name}:{name} -> {axis}') + # continue # invert insert index for layouts which are # not-left-to-right, top-to-bottom insert oriented @@ -498,10 +499,10 @@ def broadcast( else: insert_index, axes = self.layout.insert_plotitem(index, plotitem) - plotitem.setGeometry(root.vb.sceneBoundingRect()) + plotitem.vb.setGeometry(root.vb.sceneBoundingRect()) def size_to_viewbox(vb: 'ViewBox'): - plotitem.setGeometry(vb.sceneBoundingRect()) + plotitem.vb.setGeometry(root.vb.sceneBoundingRect()) root.vb.sigResized.connect(size_to_viewbox) From cb5e2d48e22153bfca152d02a511a389d13128be Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 11 Feb 2023 15:55:48 -0500 Subject: [PATCH 090/136] Add hack-zone UI REPL access via `ctl-u` --- piker/ui/_interaction.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 7682b8f720..4f4490cae0 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -79,7 +79,7 @@ async def handle_viewmode_kb_inputs( - view: 'ChartView', + view: ChartView, recv_chan: trio.abc.ReceiveChannel, ) -> None: @@ -148,6 +148,20 @@ async def handle_viewmode_kb_inputs( if mods == Qt.ControlModifier: ctrl = True + # UI REPL-shell + if ( + ctrl and key in { + Qt.Key_U, + } + ): + import tractor + god = order_mode.godw + feed = order_mode.feed + chart = order_mode.chart + vlm_chart = chart.linked.subplots['volume'] + dvlm_pi = vlm_chart._vizs['dolla_vlm'].plot + await tractor.breakpoint() + # SEARCH MODE # # ctlr-/ for "lookup", "search" -> open search tree if ( @@ -319,7 +333,7 @@ async def handle_viewmode_kb_inputs( async def handle_viewmode_mouse( - view: 'ChartView', + view: ChartView, recv_chan: trio.abc.ReceiveChannel, ) -> None: From d5ba26cfaf551871ba2b933e9640d2764d46ea01 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 11 Feb 2023 16:07:31 -0500 Subject: [PATCH 091/136] Try to hide all axes even when removed --- piker/ui/_chart.py | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index d2190d42dc..21886d7e60 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -116,14 +116,6 @@ def __init__( self.hbox.addLayout(self.vbox) - # self.toolbar_layout = QHBoxLayout() - # self.toolbar_layout.setContentsMargins(0, 0, 0, 0) - # self.vbox.addLayout(self.toolbar_layout) - - # self.init_timeframes_ui() - # self.init_strategy_ui() - # self.vbox.addLayout(self.hbox) - self._chart_cache: dict[ str, tuple[LinkedSplits, LinkedSplits], @@ -143,15 +135,18 @@ def __init__( # and the window does not? Never right?! # self.reg_for_resize(self) - @property - def linkedsplits(self) -> LinkedSplits: - return self.rt_linked - - # XXX: strat loader/saver that we don't need yet. + # TODO: strat loader/saver that we don't need yet. # def init_strategy_ui(self): + # self.toolbar_layout = QHBoxLayout() + # self.toolbar_layout.setContentsMargins(0, 0, 0, 0) + # self.vbox.addLayout(self.toolbar_layout) # self.strategy_box = StrategyBoxWidget(self) # self.toolbar_layout.addWidget(self.strategy_box) + @property + def linkedsplits(self) -> LinkedSplits: + return self.rt_linked + def set_chart_symbols( self, group_key: tuple[str], # of form . @@ -432,7 +427,7 @@ def __init__( self.godwidget = godwidget self.chart: ChartPlotWidget = None # main (ohlc) chart - self.subplots: dict[tuple[str, ...], ChartPlotWidget] = {} + self.subplots: dict[str, ChartPlotWidget] = {} self.godwidget = godwidget # placeholder for last appended ``PlotItem``'s bottom axis. @@ -1058,6 +1053,7 @@ def increment_view( # breakpoint() return + # should trigger broadcast on all overlays right? view.setXRange( min=l + x_shift, max=r + x_shift, @@ -1107,12 +1103,6 @@ def overlay_plotitem( pi.chart_widget = self pi.hideButtons() - # hide all axes not named by ``axis_side`` - for axname in ( - ({'bottom'} | allowed_sides) - {axis_side} - ): - pi.hideAxis(axname) - # compose this new plot's graphics with the current chart's # existing one but with separate axes as neede and specified. self.pi_overlay.add_plotitem( @@ -1126,6 +1116,15 @@ def overlay_plotitem( link_axes=(0,), ) + # hide all axes not named by ``axis_side`` + for axname in ( + ({'bottom'} | allowed_sides) - {axis_side} + ): + try: + pi.hideAxis(axname) + except Exception: + pass + # add axis title # TODO: do we want this API to still work? # raxis = pi.getAxis('right') From 5f470d6122acd0e24dd8baddd4d4632cb55a3985 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 16 Feb 2023 15:23:56 -0500 Subject: [PATCH 092/136] Rework overlay pin technique: "align to first" As part of solving a final bullet-issue in #455, which is specifically a case: - with N > 2 curves, one of which is the "major" dispersion curve" and the others are "minors", - we can run into a scenario where some minor curve which gets pinned to the major (due to the original "pinning technique" -> "align to major") at some `P(t)` which is *not* the major's minimum / maximum due to the minor having a smaller/shorter support and thus, - requires that in order to show then max/min on the minor curve we have to expand the range of the major curve as well but, - that also means any previously scaled (to the major) minor curves need to be adjusted as well or they'll not be pinned to the major the same way! I originally was trying to avoid doing the recursive iteration back through all previously scaled minor curves and instead decided to try implementing the "per side" curve dispersion detection (as was originally attempted when first starting this work). The idea is to decide which curve's up or down "swing in % returns" would determine the global y-range *on that side*. Turns out I stumbled on the "align to first" technique in the process: "for each overlay curve we align its earliest sample (in time) to the same level of the earliest such sample for whatever is deemed the major (directionally disperse) curve in view". I decided (with help) that this "pin to first" approach/style is equally as useful and maybe often more so when wanting to view support-disjoint time series: - instead of compressing the y-range on "longer series which have lesser sigma" to make whatever "shorter but larger-sigma series" pin to it at an intersect time step, this instead will expand the price ranges based on the earliest time step in each series. - the output global-returns-overlay-range for any N-set of series is equal to the same in the previous "pin to intersect time" technique. - the only time this technique seems less useful is for overlaying market feeds which have the same destination asset but different source assets (eg. btceur and btcusd on the same chart since if one of the series is shorter it will always be aligned to the earliest datum on the longer instead of more naturally to the intersect sample level as was in the previous approach). As such I'm going to keep this technique as discovered and will later add back optional support for the "align to intersect" approach from previous (which will again require detecting the highest dispersion curve direction-agnostic) and pin all minors to the price level at which they start on the major. Further details of the implementation rework in `.interact_graphics_cycle()` include: - add `intersect_from_longer()` to detect and deliver a common datum from 2 series which are different in length: the first time-index sample in the longer. - Rewrite the drafted `OverlayT` to only compute (inversed log-returns) transforms for a single direction and use 2 instances, one for each direction inside the `Viz`-overlay iteration loop. - do all dispersion-per-side major curve detection in the first pass of all `Viz`s on a plot, instead updating the `OverlayT` instances for each side and compensating for any length mismatch and rescale-to-minor cases in each loop cycle. --- piker/ui/_interaction.py | 541 +++++++++++++++++++++------------------ 1 file changed, 298 insertions(+), 243 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 4f4490cae0..c683f4a1d3 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -155,11 +155,11 @@ async def handle_viewmode_kb_inputs( } ): import tractor - god = order_mode.godw - feed = order_mode.feed - chart = order_mode.chart - vlm_chart = chart.linked.subplots['volume'] - dvlm_pi = vlm_chart._vizs['dolla_vlm'].plot + god = order_mode.godw # noqa + feed = order_mode.feed # noqa + chart = order_mode.chart # noqa + vlm_chart = chart.linked.subplots['volume'] # noqa + dvlm_pi = vlm_chart._vizs['dolla_vlm'].plot # noqa await tractor.breakpoint() # SEARCH MODE # @@ -360,49 +360,6 @@ async def handle_viewmode_mouse( view.order_mode.submit_order() -class OverlayT(Struct): - ''' - An overlay co-domain range transformer. - - Used to translate and apply a range from one y-range - to another based on a returns logarithm: - - R(ymn, ymx, yref) = (ymx - yref)/yref - - which gives the log-scale multiplier, and - - ymx_t = yref * (1 + R) - - which gives the inverse to translate to the same value - in the target co-domain. - - ''' - viz: Viz # viz with largest measured dispersion - - mx: float = 0 - mn: float = float('inf') - - up_swing: float = 0 - down_swing: float = 0 - disp: float = 0 - - def loglin_from_range( - self, - - y_ref: float, # reference value for dispersion metric - mn: float, # min y in target log-lin range - mx: float, # max y in target log-lin range - offset: float, # y-offset to start log-scaling from - - ) -> tuple[float, float]: - r_up = (mx - y_ref) / y_ref - r_down = (mn - y_ref) / y_ref - ymn = offset * (1 + r_down) - ymx = offset * (1 + r_up) - - return ymn, ymx - - class ChartView(ViewBox): ''' Price chart view box with interaction behaviors you'd expect from @@ -1048,7 +1005,6 @@ def interact_graphics_cycle( np.ndarray, # in-view array ], ] = {} - major_in_view: np.ndarray = None # ONLY auto-yrange the viz mapped to THIS view box if not do_overlay_scaling: @@ -1072,12 +1028,22 @@ def interact_graphics_cycle( # don't iterate overlays, just move to next chart continue - for name, viz in chart._vizs.items(): + # create a group overlay log-linearized y-range transform to + # track and eventually inverse transform all overlay curves + # to a common target max dispersion range. + dnt = OverlayT() + upt = OverlayT() - if debug_print: - print( - f'UX GRAPHICS CYCLE: {viz.name}@{chart_name}' - ) + if debug_print: + print( + f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' + + + '#'*100 + + + '\n' + ) + + for name, viz in chart._vizs.items(): out = _maybe_calc_yrange( viz, @@ -1119,7 +1085,6 @@ def interact_graphics_cycle( # charts besides OHLC? else: ymn, ymx = yrange - # print(f'adding {viz.name} to overlay') # determine start datum in view arr = viz.shm.array @@ -1128,36 +1093,169 @@ def interact_graphics_cycle( log.warning(f'{viz.name} not in view?') continue - row_start = arr[read_slc.start - 1] + # row_start = arr[read_slc.start - 1] + row_start = arr[read_slc.start] if viz.is_ohlc: - y_start = row_start['open'] + y_ref = row_start['open'] else: - y_start = row_start[viz.name] + y_ref = row_start[viz.name] profiler(f'{viz.name}@{chart_name} MINOR curve median') overlay_table[viz.plot.vb] = ( viz, - y_start, + y_ref, ymn, ymx, read_slc, in_view, ) - # find curve with max dispersion - disp = abs(ymx - ymn) / y_start + key = 'open' if viz.is_ohlc else viz.name + start_t = in_view[0]['time'] + r_down = (ymn - y_ref) / y_ref + r_up = (ymx - y_ref) / y_ref + + msg = ( + f'### {viz.name}@{chart_name} ###\n' + f'y_ref: {y_ref}\n' + f'down disp: {r_down}\n' + f'up disp: {r_up}\n' + ) + profiler(msg) + if debug_print: + print(msg) # track the "major" curve as the curve with most # dispersion. + if ( + dnt.rng is None + or ( + r_down < dnt.rng + and r_down < 0 + ) + ): + dnt.viz = viz + dnt.rng = r_down + dnt.in_view = in_view + dnt.start_t = in_view[0]['time'] + major_mn = ymn + + msg = f'NEW DOWN: {viz.name}@{chart_name} r:{r_down}\n' + profiler(msg) + if debug_print: + print(msg) + else: + # minor in the down swing range so check that if + # we apply the current rng to the minor that it + # doesn't go outside the current range for the major + # otherwise we recompute the minor's range (when + # adjusted for it's intersect point to be the new + # major's range. + intersect = intersect_from_longer( + dnt.start_t, + dnt.in_view, + start_t, + in_view, + ) + profiler(f'{viz.name}@{chart_name} intersect by t') + + if intersect: + longer_in_view, _t, i = intersect + + scaled_mn = dnt.apply_rng(y_ref) + if scaled_mn > ymn: + # after major curve scaling we detected + # the minor curve is still out of range + # so we need to adjust the major's range + # to include the new composed range. + y_maj_ref = longer_in_view[key] + new_major_ymn = ( + y_maj_ref + * + (1 + r_down) + ) + + # rewrite the major range to the new + # minor-pinned-to-major range and mark + # the transform as "virtual". + msg = ( + f'EXPAND DOWN bc {viz.name}@{chart_name}\n' + f'y_start epoch time @ {_t}:\n' + f'y_maj_ref @ {_t}: {y_maj_ref}\n' + f'R: {dnt.rng} -> {r_down}\n' + f'MN: {major_mn} -> {new_major_ymn}\n' + ) + dnt.rng = r_down + major_mn = dnt.y_val = new_major_ymn + profiler(msg) + if debug_print: + print(msg) + + if ( + upt.rng is None + or ( + r_up > upt.rng + and r_up > 0 + ) + ): + upt.rng = r_up + upt.viz = viz + upt.in_view = in_view + upt.start_t = in_view[0]['time'] + major_mx = ymx + msg = f'NEW UP: {viz.name}@{chart_name} r:{r_up}\n' + profiler(msg) + if debug_print: + print(msg) + + else: + intersect = intersect_from_longer( + upt.start_t, + upt.in_view, + start_t, + in_view, + ) + profiler(f'{viz.name}@{chart_name} intersect by t') + + if intersect: + longer_in_view, _t, i = intersect + + scaled_mx = upt.apply_rng(y_ref) + if scaled_mx < ymx: + # after major curve scaling we detected + # the minor curve is still out of range + # so we need to adjust the major's range + # to include the new composed range. + y_maj_ref = longer_in_view[key] + new_major_ymx = ( + y_maj_ref + * + (1 + r_up) + ) + + # rewrite the major range to the new + # minor-pinned-to-major range and mark + # the transform as "virtual". + msg = ( + f'EXPAND UP bc {viz.name}@{chart_name}:\n' + f'y_maj_ref @ {_t}: {y_maj_ref}\n' + f'R: {upt.rng} -> {r_up}\n' + f'MX: {major_mx} -> {new_major_ymx}\n' + ) + upt.rng = r_up + major_mx = upt.y_val = new_major_ymx + profiler(msg) + print(msg) + + # find curve with max dispersion + disp = abs(ymx - ymn) / y_ref if disp > mx_disp: major_viz = viz mx_disp = disp major_mn = ymn major_mx = ymx - major_in_view = in_view - profiler(f'{viz.name}@{chart_name} set new major') profiler(f'{viz.name}@{chart_name} MINOR curve scale') @@ -1203,6 +1301,15 @@ def interact_graphics_cycle( profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + # if a minor curves scaling brings it "outside" the range of + # the major curve (in major curve co-domain terms) then we + # need to rescale the major to also include this range. The + # below placeholder denotes when this occurs. + # group_mxmn: None | tuple[float, float] = None + + # TODO: probably re-write this loop as a compiled cpython or + # numba func. + # conduct "log-linearized multi-plot" scalings for all groups for ( view, @@ -1216,169 +1323,7 @@ def interact_graphics_cycle( ) ) in overlay_table.items(): - # we use the ymn/mx verbatim from the major curve - # (i.e. the curve measured to have the highest - # dispersion in view). - if viz is major_viz: - ymn = y_min - ymx = y_max - continue - - else: - key = 'open' if viz.is_ohlc else viz.name - - # handle case where major and minor curve(s) have - # a disjoint x-domain (one curve is smaller in - # length then the other): - # - find the highest (time) index common to both - # curves. - # - slice out the first "intersecting" y-value from - # both curves for use in log-linear scaling such - # that the intersecting y-value is used as the - # reference point for scaling minor curve's - # y-range based on the major curves y-range. - - # get intersection point y-values for both curves - minor_in_view_start = minor_in_view[0] - minor_i_start = minor_in_view_start['index'] - minor_i_start_t = minor_in_view_start['time'] - - major_in_view_start = major_in_view[0] - major_i_start = major_in_view_start['index'] - major_i_start_t = major_in_view_start['time'] - - y_major_intersect = major_in_view_start[key] - y_minor_intersect = minor_in_view_start[key] - - profiler(f'{viz.name}@{chart_name} intersect detection') - - tdiff = (major_i_start_t - minor_i_start_t) - if debug_print: - print( - f'{major_viz.name} time diff with minor:\n' - f'maj:{major_i_start_t}\n' - '-\n' - f'min:{minor_i_start_t}\n' - f'=> {tdiff}\n' - ) - - # major has later timestamp adjust minor - if tdiff > 0: - slc = slice_from_time( - arr=minor_in_view, - start_t=major_i_start_t, - stop_t=major_i_start_t, - ) - y_minor_intersect = minor_in_view[slc.start][key] - profiler(f'{viz.name}@{chart_name} intersect by t') - - # minor has later timestamp adjust major - elif tdiff < 0: - slc = slice_from_time( - arr=major_in_view, - start_t=minor_i_start_t, - stop_t=minor_i_start_t, - ) - y_major_intersect = major_in_view[slc.start][key] - - profiler(f'{viz.name}@{chart_name} intersect by t') - - if debug_print: - print( - f'major_i_start: {major_i_start}\n' - f'major_i_start_t: {major_i_start_t}\n' - f'minor_i_start: {minor_i_start}\n' - f'minor_i_start_t: {minor_i_start_t}\n' - ) - - # TODO: probably write this as a compile cpython or - # numba func. - - # compute directional (up/down) y-range - # % swing/dispersion starting at the reference index - # determined by the above indexing arithmetic. - y_ref = y_major_intersect - if not y_ref: - log.warning( - f'BAD y_major_intersect?!: {y_major_intersect}' - ) - # breakpoint() - - r_up = (major_mx - y_ref) / y_ref - r_down = (major_mn - y_ref) / y_ref - - minor_y_start = y_minor_intersect - ymn = minor_y_start * (1 + r_down) - ymx = minor_y_start * (1 + r_up) - - profiler(f'{viz.name}@{chart_name} SCALE minor') - - # XXX: handle out of view cases where minor curve - # now is outside the range of the major curve. in - # this case we then re-scale the major curve to - # include the range missing now enforced by the - # minor (now new major for this *side*). Note this - # is side (up/down) specific. - new_maj_mxmn: None | tuple[float, float] = None - if y_max > ymx: - - y_ref = y_minor_intersect - r_up_minor = (y_max - y_ref) / y_ref - - y_maj_ref = y_major_intersect - new_maj_ymx = y_maj_ref * (1 + r_up_minor) - new_maj_mxmn = (major_mn, new_maj_ymx) - if debug_print: - print( - f'{view.name} OUT OF RANGE:\n' - '--------------------\n' - f'y_max:{y_max} > ymx:{ymx}\n' - ) - ymx = y_max - profiler(f'{viz.name}@{chart_name} re-SCALE major UP') - - if y_min < ymn: - - y_ref = y_minor_intersect - r_down_minor = (y_min - y_ref) / y_ref - - y_maj_ref = y_major_intersect - new_maj_ymn = y_maj_ref * (1 + r_down_minor) - new_maj_mxmn = ( - new_maj_ymn, - new_maj_mxmn[1] if new_maj_mxmn else major_mx - ) - if debug_print: - print( - f'{view.name} OUT OF RANGE:\n' - '--------------------\n' - f'y_min:{y_min} < ymn:{ymn}\n' - ) - ymn = y_min - - profiler( - f'{viz.name}@{chart_name} re-SCALE major DOWN' - ) - - if new_maj_mxmn: - if debug_print: - print( - f'RESCALE MAJOR {major_viz.name}:\n' - f'previous: {(major_mn, major_mx)}\n' - f'new: {new_maj_mxmn}\n' - ) - major_mn, major_mx = new_maj_mxmn - - if debug_print: - print( - f'{view.name} APPLY group mxmn\n' - '--------------------\n' - f'y_minor_intersect: {y_minor_intersect}\n' - f'y_major_intersect: {y_major_intersect}\n' - f'scaled ymn: {ymn}\n' - f'scaled ymx: {ymx}\n' - f'scaled mx_disp: {mx_disp}\n' - ) + key = 'open' if viz.is_ohlc else viz.name if ( isinf(ymx) @@ -1389,32 +1334,47 @@ def interact_graphics_cycle( ) continue + ymn = dnt.apply_rng(y_start) + ymx = upt.apply_rng(y_start) + + # NOTE XXX: we have to set each curve's range once (and + # ONLY ONCE) here since we're doing this entire routine + # inside of a single render cycle (and apparently calling + # `ViewBox.setYRange()` multiple times within one only takes + # the first call as serious...) XD view._set_yrange( yrange=(ymn, ymx), ) profiler(f'{viz.name}@{chart_name} log-SCALE minor') - # NOTE XXX: we have to set the major curve's range once (and - # only once) here since we're doing this entire routine - # inside of a single render cycle (and apparently calling - # `ViewBox.setYRange()` multiple times within one only takes - # the first call as serious...) XD - if debug_print: - print( - f'Scale MAJOR {major_viz.name}:\n' - f'scaled mx_disp: {mx_disp}\n' - f'previous: {(major_mn, major_mx)}\n' - f'new: {new_maj_mxmn}\n' - ) - major_viz.plot.vb._set_yrange( - yrange=(major_mn, major_mx), - ) - profiler(f'{viz.name}@{chart_name} log-SCALE major') - # major_mx, major_mn = new_maj_mxmn + if debug_print: + print( + '------------------------------\n' + f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' + f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' + f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' + f'y_start: {y_start}\n' + f'y min: {y_min}\n' + f'y max: {y_max}\n' + f'T scaled ymn: {ymn}\n' + f'T scaled ymx: {ymx}\n' + '------------------------------\n' + ) + + # profiler(f'{viz.name}@{chart_name} log-SCALE major') + # major_mx, major_mn = group_mxmn # vrs = major_viz.plot.vb.viewRange() # if vrs[1][0] > major_mn: # breakpoint() + if debug_print: + print( + f'END UX GRAPHICS CYCLE: @{chart_name}\n' + + + '#'*100 + + + '\n' + ) if not do_linked_charts: return @@ -1466,3 +1426,98 @@ def _maybe_calc_yrange( read_slc, yrange_kwargs, ) + + +class OverlayT(Struct): + ''' + An overlay co-domain range transformer. + + Used to translate and apply a range from one y-range + to another based on a returns logarithm: + + R(ymn, ymx, yref) = (ymx - yref)/yref + + which gives the log-scale multiplier, and + + ymx_t = yref * (1 + R) + + which gives the inverse to translate to the same value + in the target co-domain. + + ''' + start_t: float | None = None + viz: Viz = None + + # % "range" computed from some ref value to the mn/mx + rng: float | None = None + in_view: np.ndarray | None = None + + # pinned-minor curve modified mn and max for the major dispersion + # curve due to one series being shorter and the pin + scaling from + # that pin point causing the original range to have to increase. + y_val: float | None = None + + def apply_rng( + self, + y_start: float, # reference value for dispersion metric + + ) -> float: + return y_start * (1 + self.rng) + + # def loglin_from_range( + # self, + + # y_ref: float, # reference value for dispersion metric + # mn: float, # min y in target log-lin range + # mx: float, # max y in target log-lin range + # offset: float, # y-offset to start log-scaling from + + # ) -> tuple[float, float]: + # r_up = (mx - y_ref) / y_ref + # r_down = (mn - y_ref) / y_ref + # ymn = offset * (1 + r_down) + # ymx = offset * (1 + r_up) + + # return ymn, ymx + + +def intersect_from_longer( + start_t_first: float, + in_view_first: np.ndarray, + + start_t_second: float, + in_view_second: np.ndarray, + +) -> np.ndarray: + + tdiff = start_t_first - start_t_second + + if tdiff == 0: + return False + + i: int = 0 + + # first time series has an "earlier" first time stamp then the 2nd. + # aka 1st is "shorter" then the 2nd. + if tdiff > 0: + longer = in_view_second + find_t = start_t_first + i = 1 + + # second time series has an "earlier" first time stamp then the 1st. + # aka 2nd is "shorter" then the 1st. + elif tdiff < 0: + longer = in_view_first + find_t = start_t_second + i = 0 + + slc = slice_from_time( + arr=longer, + start_t=find_t, + stop_t=find_t, + ) + return ( + longer[slc.start], + find_t, + i, + ) From 54ecb0990f6d732d9afed37ac607b05f35325f8b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 21 Feb 2023 08:49:55 -0500 Subject: [PATCH 093/136] Remove vlm chart again, drop lotsa fsp cruft --- piker/ui/_fsp.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index d1c53db5af..7e69a81749 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -612,7 +612,7 @@ async def open_vlm_displays( task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED, -) -> ChartPlotWidget: +) -> None: ''' Volume subchart displays. @@ -667,7 +667,6 @@ async def open_vlm_displays( # built-in vlm which we plot ASAP since it's # usually data provided directly with OHLC history. shm = ohlcv - # ohlc_chart = linked.chart vlm_chart = linked.add_plot( name='volume', @@ -693,6 +692,13 @@ async def open_vlm_displays( # vlm_chart.hideAxis('right') vlm_chart.hideAxis('left') + # TODO: is it worth being able to remove axes (from i guess + # a perf perspective) enough that we can actually do this and + # other axis related calls (for eg. label upddates in the + # display loop) don't raise when a the axis can't be loaded and + # thus would normally cause many label related calls to crash? + # axis = vlm_chart.removeAxis('left') + # send back new chart to caller task_status.started(vlm_chart) @@ -705,23 +711,16 @@ async def open_vlm_displays( # read from last calculated value value = shm.array['volume'][-1] - last_val_sticky.update_from_data(-1, value) _, _, vlm_curve = vlm_viz.update_graphics() - # size view to data once at outset - # vlm_chart.view._set_yrange( - # viz=vlm_viz - # ) - # add axis title axis = vlm_chart.getAxis('right') axis.set_title(' vlm') if dvlm: - tasks_ready = [] # spawn and overlay $ vlm on the same subchart dvlm_flume, started = await admin.start_engine_task( dolla_vlm, @@ -839,18 +838,15 @@ def chart_curves( # hide the original vlm curve since the $vlm one is now # displayed and the curves are effectively the same minus # liquidity events (well at least on low OHLC periods - 1s). - vlm_curve.hide() + # vlm_curve.hide() vlm_chart.removeItem(vlm_curve) - # vlm_chart.plotItem.layout.setMinimumWidth(0) - # vlm_chart.removeAxis('left') vlm_viz = vlm_chart._vizs['volume'] - + vlm_chart.view.disable_auto_yrange() # NOTE: DON'T DO THIS. # WHY: we want range sorting on volume for the RHS label! # -> if you don't want that then use this but likely you # only will if we decide to drop unit vlm.. # vlm_viz.render = False - vlm_chart.view.disable_auto_yrange() # Trade rate overlay # XXX: requires an additional overlay for From 753e991dae7e4bc1e1dd5398d5246fe02885a183 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 21 Feb 2023 09:14:26 -0500 Subject: [PATCH 094/136] Adjust `.ui` modules to new set-style "optional" annots --- piker/ui/_annotate.py | 4 ++-- piker/ui/_axes.py | 11 +++++----- piker/ui/_chart.py | 45 +++++++++++++++++++-------------------- piker/ui/_cursor.py | 10 +++++---- piker/ui/_curve.py | 8 +++---- piker/ui/_dataviz.py | 5 ++--- piker/ui/_display.py | 7 +++--- piker/ui/_editors.py | 5 ++--- piker/ui/_forms.py | 14 ++++++------ piker/ui/_fsp.py | 9 +++++--- piker/ui/_interaction.py | 15 ++++++------- piker/ui/_label.py | 11 +++++----- piker/ui/_lines.py | 13 ++++++----- piker/ui/_notify.py | 3 +-- piker/ui/_orm.py | 17 +++++++-------- piker/ui/_pg_overrides.py | 17 +++++++-------- piker/ui/_position.py | 13 ++++++----- piker/ui/_search.py | 15 ++++++------- piker/ui/_style.py | 11 +++++----- piker/ui/_window.py | 11 +++++----- piker/ui/order_mode.py | 13 ++++++----- 21 files changed, 127 insertions(+), 130 deletions(-) diff --git a/piker/ui/_annotate.py b/piker/ui/_annotate.py index 4bad2f66b7..f3eeeb074b 100644 --- a/piker/ui/_annotate.py +++ b/piker/ui/_annotate.py @@ -18,7 +18,7 @@ Annotations for ur faces. """ -from typing import Callable, Optional +from typing import Callable from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import QPointF, QRectF @@ -105,7 +105,7 @@ def __init__( get_level: Callable[..., float], size: float = 20, keep_in_view: bool = True, - on_paint: Optional[Callable] = None, + on_paint: Callable | None = None, ) -> None: diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py index b6fb928196..9eda3c75f6 100644 --- a/piker/ui/_axes.py +++ b/piker/ui/_axes.py @@ -20,7 +20,7 @@ """ from __future__ import annotations from functools import lru_cache -from typing import Optional, Callable +from typing import Callable from math import floor import numpy as np @@ -60,7 +60,8 @@ def __init__( **kwargs ) - # XXX: pretty sure this makes things slower + # XXX: pretty sure this makes things slower! + # no idea why given we only move labels for the most part? # self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) self.pi = plotitem @@ -190,7 +191,7 @@ def __init__( *args, min_tick: int = 2, title: str = '', - formatter: Optional[Callable[[float], str]] = None, + formatter: Callable[[float], str] | None = None, **kwargs ) -> None: @@ -202,8 +203,8 @@ def __init__( def set_title( self, title: str, - view: Optional[ChartView] = None, - color: Optional[str] = None, + view: ChartView | None = None, + color: str | None = None, ) -> Label: ''' diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 21886d7e60..dbc9f495c9 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -21,7 +21,6 @@ from __future__ import annotations from typing import ( Iterator, - Optional, TYPE_CHECKING, ) @@ -102,7 +101,7 @@ def __init__( super().__init__(parent) - self.search: Optional[SearchWidget] = None + self.search: SearchWidget | None = None self.hbox = QHBoxLayout(self) self.hbox.setContentsMargins(0, 0, 0, 0) @@ -121,9 +120,9 @@ def __init__( tuple[LinkedSplits, LinkedSplits], ] = {} - self.hist_linked: Optional[LinkedSplits] = None - self.rt_linked: Optional[LinkedSplits] = None - self._active_cursor: Optional[Cursor] = None + self.hist_linked: LinkedSplits | None = None + self.rt_linked: LinkedSplits | None = None + self._active_cursor: Cursor | None = None # assigned in the startup func `_async_main()` self._root_n: trio.Nursery = None @@ -367,7 +366,7 @@ class ChartnPane(QFrame): ''' sidepane: FieldsForm | SearchWidget hbox: QHBoxLayout - chart: Optional[ChartPlotWidget] = None + chart: ChartPlotWidget | None = None def __init__( self, @@ -445,7 +444,7 @@ def __init__( # chart-local graphics state that can be passed to # a ``graphic_update_cycle()`` call by any task wishing to # update the UI for a given "chart instance". - self.display_state: Optional[DisplayState] = None + self.display_state: DisplayState | None = None self._symbol: Symbol = None @@ -475,7 +474,7 @@ def symbol(self) -> Symbol: def set_split_sizes( self, - prop: Optional[float] = None, + prop: float | None = None, ) -> None: ''' @@ -569,11 +568,11 @@ def add_plot( shm: ShmArray, flume: Flume, - array_key: Optional[str] = None, + array_key: str | None = None, style: str = 'line', _is_main: bool = False, - sidepane: Optional[QWidget] = None, + sidepane: QWidget | None = None, draw_kwargs: dict = {}, **cpw_kwargs, @@ -789,7 +788,7 @@ def add_plot( def resize_sidepanes( self, - from_linked: Optional[LinkedSplits] = None, + from_linked: LinkedSplits | None = None, ) -> None: ''' @@ -857,7 +856,7 @@ def __init__( # TODO: load from config use_open_gl: bool = False, - static_yrange: Optional[tuple[float, float]] = None, + static_yrange: tuple[float, float] | None = None, parent=None, **kwargs, @@ -872,7 +871,7 @@ def __init__( # NOTE: must be set bfore calling ``.mk_vb()`` self.linked = linkedsplits - self.sidepane: Optional[FieldsForm] = None + self.sidepane: FieldsForm | None = None # source of our custom interactions self.cv = self.mk_vb(name) @@ -1035,7 +1034,7 @@ def default_view( def increment_view( self, datums: int = 1, - vb: Optional[ChartView] = None, + vb: ChartView | None = None, ) -> None: ''' @@ -1066,8 +1065,8 @@ def increment_view( def overlay_plotitem( self, name: str, - index: Optional[int] = None, - axis_title: Optional[str] = None, + index: int | None = None, + axis_title: str | None = None, axis_side: str = 'right', axis_kwargs: dict = {}, @@ -1140,11 +1139,11 @@ def draw_curve( shm: ShmArray, flume: Flume, - array_key: Optional[str] = None, + array_key: str | None = None, overlay: bool = False, - color: Optional[str] = None, + color: str | None = None, add_label: bool = True, - pi: Optional[pg.PlotItem] = None, + pi: pg.PlotItem | None = None, step_mode: bool = False, is_ohlc: bool = False, add_sticky: None | str = 'right', @@ -1277,7 +1276,7 @@ def draw_ohlc( shm: ShmArray, flume: Flume, - array_key: Optional[str] = None, + array_key: str | None = None, **draw_curve_kwargs, ) -> Viz: @@ -1308,10 +1307,10 @@ def leaveEvent(self, ev): # noqa def maxmin( self, - name: Optional[str] = None, - bars_range: Optional[tuple[ + name: str | None = None, + bars_range: tuple[ int, int, int, int, int, int - ]] = None, + ] | None = None, ) -> tuple[float, float]: ''' diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py index 8c358c3f2b..c118829f53 100644 --- a/piker/ui/_cursor.py +++ b/piker/ui/_cursor.py @@ -21,7 +21,6 @@ from __future__ import annotations from functools import partial from typing import ( - Optional, Callable, TYPE_CHECKING, ) @@ -38,7 +37,10 @@ _font_small, _font, ) -from ._axes import YAxisLabel, XAxisLabel +from ._axes import ( + YAxisLabel, + XAxisLabel, +) from ..log import get_logger if TYPE_CHECKING: @@ -167,7 +169,7 @@ def __init__( anchor_at: str = ('top', 'right'), justify_text: str = 'left', - font_size: Optional[int] = None, + font_size: int | None = None, ) -> None: @@ -338,7 +340,7 @@ def __init__( self.linked = linkedsplits self.graphics: dict[str, pg.GraphicsObject] = {} - self.xaxis_label: Optional[XAxisLabel] = None + self.xaxis_label: XAxisLabel | None = None self.always_show_xlabel: bool = True self.plots: list['PlotChartWidget'] = [] # type: ignore # noqa self.active_plot = None diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 4206417358..013448f353 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -19,7 +19,7 @@ """ from contextlib import contextmanager as cm -from typing import Optional, Callable +from typing import Callable import numpy as np import pyqtgraph as pg @@ -86,7 +86,7 @@ def __init__( # line styling color: str = 'bracket', last_step_color: str | None = None, - fill_color: Optional[str] = None, + fill_color: str | None = None, style: str = 'solid', **kwargs @@ -191,14 +191,14 @@ class Curve(FlowGraphic): ''' # TODO: can we remove this? - # sub_br: Optional[Callable] = None + # sub_br: Callable | None = None def __init__( self, *args, # color: str = 'default_lightest', - # fill_color: Optional[str] = None, + # fill_color: str | None = None, # style: str = 'solid', **kwargs diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 32f400a1fa..73a0ab6b1f 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -25,7 +25,6 @@ floor, ) from typing import ( - Optional, Literal, TYPE_CHECKING, ) @@ -249,7 +248,7 @@ class Viz(msgspec.Struct): # , frozen=True): # in some cases a viz may want to change its # graphical "type" or, "form" when downsampling, to # start this is only ever an interpolation line. - ds_graphics: Optional[Curve] = None + ds_graphics: Curve | None = None is_ohlc: bool = False render: bool = True # toggle for display loop @@ -576,7 +575,7 @@ def datums_range( def read( self, - array_field: Optional[str] = None, + array_field: str | None = None, index_field: str | None = None, profiler: None | Profiler = None, diff --git a/piker/ui/_display.py b/piker/ui/_display.py index d209159453..8a4de766f8 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -26,7 +26,6 @@ from math import floor import time from typing import ( - Optional, Any, TYPE_CHECKING, ) @@ -205,8 +204,8 @@ class DisplayState(Struct): globalz: None | dict[str, Any] = None - vlm_chart: Optional[ChartPlotWidget] = None - vlm_sticky: Optional[YAxisLabel] = None + vlm_chart: ChartPlotWidget | None = None + vlm_sticky: YAxisLabel | None = None wap_in_history: bool = False @@ -494,7 +493,7 @@ def graphics_update_cycle( wap_in_history: bool = False, trigger_all: bool = False, # flag used by prepend history updates - prepend_update_index: Optional[int] = None, + prepend_update_index: int | None = None, ) -> None: diff --git a/piker/ui/_editors.py b/piker/ui/_editors.py index 08f198529a..df88131478 100644 --- a/piker/ui/_editors.py +++ b/piker/ui/_editors.py @@ -21,7 +21,6 @@ from __future__ import annotations from collections import defaultdict from typing import ( - Optional, TYPE_CHECKING ) @@ -67,7 +66,7 @@ def add( x: float, y: float, color='default', - pointing: Optional[str] = None, + pointing: str | None = None, ) -> pg.ArrowItem: ''' @@ -221,7 +220,7 @@ def remove_line( line: LevelLine = None, uuid: str = None, - ) -> Optional[LevelLine]: + ) -> LevelLine | None: '''Remove a line by refernce or uuid. If no lines or ids are provided remove all lines under the diff --git a/piker/ui/_forms.py b/piker/ui/_forms.py index a6cddae98a..a86cf90308 100644 --- a/piker/ui/_forms.py +++ b/piker/ui/_forms.py @@ -23,7 +23,9 @@ from functools import partial from math import floor from typing import ( - Optional, Any, Callable, Awaitable + Any, + Callable, + Awaitable, ) import trio @@ -263,7 +265,7 @@ def set_items( def set_icon( self, key: str, - icon_name: Optional[str], + icon_name: str | None, ) -> None: self.setItemIcon( @@ -344,7 +346,7 @@ def add_field_label( name: str, - font_size: Optional[int] = None, + font_size: int | None = None, font_color: str = 'default_lightest', ) -> QtGui.QLabel: @@ -469,7 +471,7 @@ def mk_form( parent: QWidget, fields_schema: dict, - font_size: Optional[int] = None, + font_size: int | None = None, ) -> FieldsForm: @@ -628,7 +630,7 @@ def mk_fill_status_bar( parent_pane: QWidget, form: FieldsForm, pane_vbox: QVBoxLayout, - label_font_size: Optional[int] = None, + label_font_size: int | None = None, ) -> ( # TODO: turn this into a composite? @@ -738,7 +740,7 @@ def mk_fill_status_bar( def mk_order_pane_layout( parent: QWidget, - # accounts: dict[str, Optional[str]], + # accounts: dict[str, str | None], ) -> FieldsForm: diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 7e69a81749..6e600743c9 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -24,7 +24,10 @@ from functools import partial import inspect from itertools import cycle -from typing import Optional, AsyncGenerator, Any +from typing import ( + AsyncGenerator, + Any, +) import numpy as np import msgspec @@ -80,7 +83,7 @@ def has_vlm(ohlcv: ShmArray) -> bool: def update_fsp_chart( viz, graphics_name: str, - array_key: Optional[str], + array_key: str | None, **kwargs, ) -> None: @@ -476,7 +479,7 @@ async def start_engine_task( target: Fsp, conf: dict[str, dict[str, Any]], - worker_name: Optional[str] = None, + worker_name: str | None = None, loglevel: str = 'info', ) -> (Flume, trio.Event): diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index c683f4a1d3..5e2c4dbeaf 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -26,7 +26,6 @@ import time from typing import ( Any, - Optional, Callable, TYPE_CHECKING, ) @@ -93,7 +92,7 @@ async def handle_viewmode_kb_inputs( last = time.time() action: str - on_next_release: Optional[Callable] = None + on_next_release: Callable | None = None # for quick key sequence-combo pattern matching # we have a min_tap period and these should not @@ -379,7 +378,7 @@ def __init__( name: str, parent: pg.PlotItem = None, - static_yrange: Optional[tuple[float, float]] = None, + static_yrange: tuple[float, float] | None = None, **kwargs, ): @@ -595,7 +594,7 @@ def wheelEvent( def mouseDragEvent( self, ev, - axis: Optional[int] = None, + axis: int | None = None, ) -> None: pos = ev.pos() @@ -753,19 +752,19 @@ def _set_yrange( self, *, - yrange: Optional[tuple[float, float]] = None, + yrange: tuple[float, float] | None = None, viz: Viz | None = None, # NOTE: this value pairs (more or less) with L1 label text # height offset from from the bid/ask lines. range_margin: float | None = 0.09, - bars_range: Optional[tuple[int, int, int, int]] = None, + bars_range: tuple[int, int, int, int] | None = None, # flag to prevent triggering sibling charts from the same linked # set from recursion errors. autoscale_linked_plots: bool = False, - name: Optional[str] = None, + name: str | None = None, ) -> None: ''' @@ -871,7 +870,7 @@ def _set_yrange( def enable_auto_yrange( self, viz: Viz, - src_vb: Optional[ChartView] = None, + src_vb: ChartView | None = None, ) -> None: ''' diff --git a/piker/ui/_label.py b/piker/ui/_label.py index 247b4cc09a..85fbbb8a7a 100644 --- a/piker/ui/_label.py +++ b/piker/ui/_label.py @@ -19,7 +19,10 @@ """ from inspect import isfunction -from typing import Callable, Optional, Any +from typing import ( + Callable, + Any, +) import pyqtgraph as pg from PyQt5 import QtGui, QtWidgets @@ -70,9 +73,7 @@ def __init__( self._fmt_str = fmt_str self._view_xy = QPointF(0, 0) - self.scene_anchor: Optional[ - Callable[..., QPointF] - ] = None + self.scene_anchor: Callable[..., QPointF] | None = None self._x_offset = x_offset @@ -164,7 +165,7 @@ def set_view_pos( self, y: float, - x: Optional[float] = None, + x: float | None = None, ) -> None: diff --git a/piker/ui/_lines.py b/piker/ui/_lines.py index 461544e73d..4469a6735b 100644 --- a/piker/ui/_lines.py +++ b/piker/ui/_lines.py @@ -22,7 +22,6 @@ from functools import partial from math import floor from typing import ( - Optional, Callable, TYPE_CHECKING, ) @@ -32,7 +31,7 @@ from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import QPointF -from ._annotate import qgo_draw_markers, LevelMarker +from ._annotate import LevelMarker from ._anchors import ( vbr_left, right_axis, @@ -295,7 +294,7 @@ def mouseDragEvent(self, ev): # show y-crosshair again cursor.show_xhair() - def get_cursor(self) -> Optional[Cursor]: + def get_cursor(self) -> Cursor | None: chart = self._chart cur = chart.linked.cursor @@ -610,11 +609,11 @@ def order_line( chart, level: float, - action: Optional[str] = 'buy', # buy or sell + action: str | None = 'buy', # buy or sell - marker_style: Optional[str] = None, - level_digits: Optional[float] = 3, - size: Optional[int] = 1, + marker_style: str | None = None, + level_digits: float | None = 3, + size: int | None = 1, size_digits: int = 1, show_markers: bool = False, submit_price: float = None, diff --git a/piker/ui/_notify.py b/piker/ui/_notify.py index c14b3cbbe3..4a33dabb18 100644 --- a/piker/ui/_notify.py +++ b/piker/ui/_notify.py @@ -21,7 +21,6 @@ import os import platform import subprocess -from typing import Optional import trio @@ -33,7 +32,7 @@ log = get_logger(__name__) -_dbus_uid: Optional[str] = '' +_dbus_uid: str | None = '' async def notify_from_ems_status_msg( diff --git a/piker/ui/_orm.py b/piker/ui/_orm.py index 8dea0b6d5f..eaca69e2f6 100644 --- a/piker/ui/_orm.py +++ b/piker/ui/_orm.py @@ -20,8 +20,9 @@ """ from __future__ import annotations from typing import ( - Optional, Generic, - TypeVar, Callable, + Generic, + TypeVar, + Callable, ) # from pydantic import BaseModel, validator @@ -42,13 +43,11 @@ class Field(GenericModel, Generic[DataType]): - widget_factory: Optional[ - Callable[ - [QWidget, 'Field'], - QWidget - ] - ] - value: Optional[DataType] = None + widget_factory: Callable[ + [QWidget, 'Field'], + QWidget + ] | None = None + value: DataType | None = None class Selection(Field[DataType], Generic[DataType]): diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py index 53ed5405c6..bd35064be7 100644 --- a/piker/ui/_pg_overrides.py +++ b/piker/ui/_pg_overrides.py @@ -22,8 +22,6 @@ view transforms. """ -from typing import Optional - import pyqtgraph as pg from ._axes import Axis @@ -47,9 +45,10 @@ def invertQTransform(tr): def _do_overrides() -> None: - """Dooo eeet. + ''' + Dooo eeet. - """ + ''' # we don't care about potential fp issues inside Qt pg.functions.invertQTransform = invertQTransform pg.PlotItem = PlotItem @@ -119,7 +118,7 @@ def removeAxis( name: str, unlink: bool = True, - ) -> Optional[pg.AxisItem]: + ) -> pg.AxisItem | None: """ Remove an axis from the contained axis items by ```name: str```. @@ -169,14 +168,14 @@ def removeAxis( def setAxisItems( self, # XXX: yeah yeah, i know we can't use type annots like this yet. - axisItems: Optional[dict[str, pg.AxisItem]] = None, + axisItems: dict[str, pg.AxisItem] | None = None, add_to_layout: bool = True, default_axes: list[str] = ['left', 'bottom'], ): - """ - Override axis item setting to only + ''' + Override axis item setting to only what is passed in. - """ + ''' axisItems = axisItems or {} # XXX: wth is is this even saying?!? diff --git a/piker/ui/_position.py b/piker/ui/_position.py index 9baca8ee63..41421fb672 100644 --- a/piker/ui/_position.py +++ b/piker/ui/_position.py @@ -25,7 +25,6 @@ from math import floor, copysign from typing import ( Callable, - Optional, TYPE_CHECKING, ) @@ -170,12 +169,12 @@ class SettingsPane: limit_label: QLabel # encompasing high level namespace - order_mode: Optional['OrderMode'] = None # typing: ignore # noqa + order_mode: OrderMode | None = None # typing: ignore # noqa def set_accounts( self, names: list[str], - sizes: Optional[list[float]] = None, + sizes: list[float] | None = None, ) -> None: combo = self.form.fields['account'] @@ -540,8 +539,8 @@ class Nav(Struct): charts: dict[int, ChartPlotWidget] pp_labels: dict[str, Label] = {} size_labels: dict[str, Label] = {} - lines: dict[str, Optional[LevelLine]] = {} - level_markers: dict[str, Optional[LevelMarker]] = {} + lines: dict[str, LevelLine | None] = {} + level_markers: dict[str, LevelMarker | None] = {} color: str = 'default_lightest' def update_ui( @@ -550,7 +549,7 @@ def update_ui( price: float, size: float, slots_used: float, - size_digits: Optional[int] = None, + size_digits: int | None = None, ) -> None: ''' @@ -847,7 +846,7 @@ def pane(self) -> FieldsForm: def update_from_pp( self, - position: Optional[Position] = None, + position: Position | None = None, set_as_startup: bool = False, ) -> None: diff --git a/piker/ui/_search.py b/piker/ui/_search.py index ef0cca80db..9627e83d17 100644 --- a/piker/ui/_search.py +++ b/piker/ui/_search.py @@ -35,7 +35,6 @@ from contextlib import asynccontextmanager from functools import partial from typing import ( - Optional, Callable, Awaitable, Sequence, @@ -178,8 +177,8 @@ def set_font_size(self, size: int = 18): def resize_to_results( self, - w: Optional[float] = 0, - h: Optional[float] = None, + w: float | None = 0, + h: float | None = None, ) -> None: model = self.model() @@ -380,7 +379,7 @@ def find_section( self, section: str, - ) -> Optional[QModelIndex]: + ) -> QModelIndex | None: ''' Find the *first* depth = 1 section matching ``section`` in the tree and return its index. @@ -504,7 +503,7 @@ def set_section_entries( def show_matches( self, - wh: Optional[tuple[float, float]] = None, + wh: tuple[float, float] | None = None, ) -> None: @@ -529,7 +528,7 @@ def __init__( self, parent: QWidget, godwidget: QWidget, - view: Optional[CompleterView] = None, + view: CompleterView | None = None, **kwargs, ) -> None: @@ -708,7 +707,7 @@ async def chart_current_item( self, clear_to_cache: bool = True, - ) -> Optional[str]: + ) -> str | None: ''' Attempt to load and switch the current selected completion result to the affiliated chart app. @@ -1167,7 +1166,7 @@ async def register_symbol_search( provider_name: str, search_routine: Callable, - pause_period: Optional[float] = None, + pause_period: float | None = None, ) -> AsyncIterator[dict]: diff --git a/piker/ui/_style.py b/piker/ui/_style.py index 52ac753a62..67f14a93dd 100644 --- a/piker/ui/_style.py +++ b/piker/ui/_style.py @@ -18,7 +18,7 @@ Qt UI styling. ''' -from typing import Optional, Dict +from typing import Dict import math import pyqtgraph as pg @@ -52,7 +52,7 @@ def __init__( # TODO: move to config name: str = 'Hack', font_size: str = 'default', - # size_in_inches: Optional[float] = None, + ) -> None: self.name = name self._qfont = QtGui.QFont(name) @@ -91,13 +91,14 @@ def scale(self) -> float: def px_size(self) -> int: return self._qfont.pixelSize() - def configure_to_dpi(self, screen: Optional[QtGui.QScreen] = None): - """Set an appropriately sized font size depending on the screen DPI. + def configure_to_dpi(self, screen: QtGui.QScreen | None = None): + ''' + Set an appropriately sized font size depending on the screen DPI. If we end up needing to generalize this more here there are resources listed in the script in ``snippets/qt_screen_info.py``. - """ + ''' if screen is None: screen = self.screen diff --git a/piker/ui/_window.py b/piker/ui/_window.py index a2c432616b..0fc87c24ea 100644 --- a/piker/ui/_window.py +++ b/piker/ui/_window.py @@ -23,7 +23,6 @@ import time from typing import ( Callable, - Optional, Union, ) import uuid @@ -64,9 +63,9 @@ def open_status( self, msg: str, - final_msg: Optional[str] = None, + final_msg: str | None = None, clear_on_next: bool = False, - group_key: Optional[Union[bool, str]] = False, + group_key: Union[bool, str] | None = False, ) -> Union[Callable[..., None], str]: ''' @@ -178,11 +177,11 @@ def __init__(self, parent=None): self.setWindowTitle(self.title) # set by runtime after `trio` is engaged. - self.godwidget: Optional[GodWidget] = None + self.godwidget: GodWidget | None = None self._status_bar: QStatusBar = None self._status_label: QLabel = None - self._size: Optional[tuple[int, int]] = None + self._size: tuple[int, int] | None = None @property def mode_label(self) -> QLabel: @@ -289,7 +288,7 @@ def current_screen(self) -> QScreen: def configure_to_desktop( self, - size: Optional[tuple[int, int]] = None, + size: tuple[int, int] | None = None, ) -> None: ''' diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py index 4a194a79f9..cf5f53b1ba 100644 --- a/piker/ui/order_mode.py +++ b/piker/ui/order_mode.py @@ -25,7 +25,6 @@ from pprint import pformat import time from typing import ( - Optional, Callable, Any, TYPE_CHECKING, @@ -129,7 +128,7 @@ class OrderMode: trackers: dict[str, PositionTracker] # switched state, the current position - current_pp: Optional[PositionTracker] = None + current_pp: PositionTracker | None = None active: bool = False name: str = 'order' dialogs: dict[str, Dialog] = field(default_factory=dict) @@ -139,7 +138,7 @@ class OrderMode: 'buy': 'buy_green', 'sell': 'sell_red', } - _staged_order: Optional[Order] = None + _staged_order: Order | None = None def on_level_change_update_next_order_info( self, @@ -180,7 +179,7 @@ def on_level_change_update_next_order_info( def new_line_from_order( self, order: Order, - chart: Optional[ChartPlotWidget] = None, + chart: ChartPlotWidget | None = None, **line_kwargs, ) -> LevelLine: @@ -340,7 +339,7 @@ def stage_order( def submit_order( self, send_msg: bool = True, - order: Optional[Order] = None, + order: Order | None = None, ) -> Dialog: ''' @@ -452,7 +451,7 @@ def order_line_modify_complete( def on_submit( self, uuid: str, - order: Optional[Order] = None, + order: Order | None = None, ) -> Dialog: ''' @@ -496,7 +495,7 @@ def on_fill( price: float, time_s: float, - pointing: Optional[str] = None, + pointing: str | None = None, ) -> None: ''' From 35c40e825ac51b11f73bb5c770ea909db466bc1a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 21 Feb 2023 16:09:11 -0500 Subject: [PATCH 095/136] Move overlay transform logic to new `.ui.view_mode` It was getting waayy to long to be jammed in a method XD This moves all the chart-viz iteration and transform logic into a new `piker.ui.view_mode.overlay_viewlists()` core routine which will make it a lot nicer for, - AOT compilation via `numba` / `cython` / `mypyc`. - decoupling from the `pyqtgraph.ViewBox` APIs if we ever decide to get crazy and go without another graphics engine. - keeping your head clear when trying to rework the code B) --- piker/ui/_interaction.py | 586 ++---------------------------------- piker/ui/view_mode.py | 635 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 653 insertions(+), 568 deletions(-) create mode 100644 piker/ui/view_mode.py diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 5e2c4dbeaf..0a972d8c82 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -1,5 +1,5 @@ # piker: trading gear for hackers -# Copyright (C) Tyler Goodlet (in stewardship for piker0) +# Copyright (C) Tyler Goodlet (in stewardship for pikers) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -14,18 +14,14 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -""" +''' Chart view box primitives -""" +''' from __future__ import annotations from contextlib import asynccontextmanager -from math import ( - isinf, -) import time from typing import ( - Any, Callable, TYPE_CHECKING, ) @@ -42,8 +38,7 @@ from ..log import get_logger from .._profile import Profiler from .._profile import pg_profile_enabled, ms_slower_then -from ..data.types import Struct -from ..data._pathops import slice_from_time +from .view_mode import overlay_viewlists # from ._style import _min_points_to_show from ._editors import SelectRect from . import _event @@ -51,7 +46,6 @@ if TYPE_CHECKING: from ._chart import ChartPlotWidget from ._dataviz import Viz - # from ._overlay import PlotItemOverlay log = get_logger(__name__) @@ -926,14 +920,17 @@ def interact_graphics_cycle( self, *args, # capture Qt signal (slot) inputs - debug_print: bool = False, - do_overlay_scaling: bool = True, + # debug_print: bool = False, do_linked_charts: bool = True, + do_overlay_scaling: bool = True, yrange_kwargs: dict[ str, tuple[float, float], ] | None = None, + + overlay_technique: str = 'loglin_to_first', + ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -964,559 +961,12 @@ def interact_graphics_cycle( plots = {chart.name: chart} # TODO: a faster single-loop-iterator way of doing this? - for chart_name, chart in plots.items(): - - # Common `PlotItem` maxmin table; presumes that some path - # graphics (and thus their backing data sets) are in the - # same co-domain and view box (since the were added - # a separate graphics objects to a common plot) and thus can - # be sorted as one set per plot. - mxmns_by_common_pi: dict[ - pg.PlotItem, - tuple[float, float], - ] = {} - - # proportional group auto-scaling per overlay set. - # -> loop through overlays on each multi-chart widget - # and scale all y-ranges based on autoscale config. - # -> for any "group" overlay we want to dispersion normalize - # and scale minor charts onto the major chart: the chart - # with the most dispersion in the set. - major_viz: Viz = None - major_mx: float = 0 - major_mn: float = float('inf') - mx_disp: float = 0 - - # collect certain flows have grapics objects **in seperate - # plots/viewboxes** into groups and do a common calc to - # determine auto-ranging input for `._set_yrange()`. - # this is primarly used for our so called "log-linearized - # multi-plot" overlay technique. - overlay_table: dict[ - ViewBox, - tuple[ - Viz, - float, # y start - float, # y min - float, # y max - float, # y median - slice, # in-view array slice - np.ndarray, # in-view array - ], - ] = {} - - # ONLY auto-yrange the viz mapped to THIS view box - if not do_overlay_scaling: - viz = self._viz - if debug_print: - print(f'ONLY ranging THIS viz: {viz.name}') - - out = _maybe_calc_yrange( - viz, - yrange_kwargs, - profiler, - chart_name, - ) - if out is None: - continue - - read_slc, yrange_kwargs = out - viz.plot.vb._set_yrange(**yrange_kwargs) - profiler(f'{viz.name}@{chart_name} single curve yrange') - - # don't iterate overlays, just move to next chart - continue - - # create a group overlay log-linearized y-range transform to - # track and eventually inverse transform all overlay curves - # to a common target max dispersion range. - dnt = OverlayT() - upt = OverlayT() - - if debug_print: - print( - f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' - + - '#'*100 - + - '\n' - ) - - for name, viz in chart._vizs.items(): - - out = _maybe_calc_yrange( - viz, - yrange_kwargs, - profiler, - chart_name, - ) - if out is None: - continue - - read_slc, yrange_kwargs = out - yrange = yrange_kwargs['yrange'] - pi = viz.plot - - # handle multiple graphics-objs per viewbox cases - mxmn = mxmns_by_common_pi.get(pi) - if mxmn: - yrange = mxmns_by_common_pi[pi] = ( - min(yrange[0], mxmn[0]), - max(yrange[1], mxmn[1]), - ) - - else: - mxmns_by_common_pi[pi] = yrange - - profiler(f'{viz.name}@{chart_name} common pi sort') - - # non-overlay group case - if not viz.is_ohlc: - pi.vb._set_yrange(yrange=yrange) - profiler( - f'{viz.name}@{chart_name} simple std `._set_yrange()`' - ) - - # handle overlay log-linearized group scaling cases - # TODO: a better predicate here, likely something - # to do with overlays and their settings.. - # TODO: we probably eventually might want some other - # charts besides OHLC? - else: - ymn, ymx = yrange - - # determine start datum in view - arr = viz.shm.array - in_view = arr[read_slc] - if not in_view.size: - log.warning(f'{viz.name} not in view?') - continue - - # row_start = arr[read_slc.start - 1] - row_start = arr[read_slc.start] - - if viz.is_ohlc: - y_ref = row_start['open'] - else: - y_ref = row_start[viz.name] - - profiler(f'{viz.name}@{chart_name} MINOR curve median') - - overlay_table[viz.plot.vb] = ( - viz, - y_ref, - ymn, - ymx, - read_slc, - in_view, - ) - - key = 'open' if viz.is_ohlc else viz.name - start_t = in_view[0]['time'] - r_down = (ymn - y_ref) / y_ref - r_up = (ymx - y_ref) / y_ref - - msg = ( - f'### {viz.name}@{chart_name} ###\n' - f'y_ref: {y_ref}\n' - f'down disp: {r_down}\n' - f'up disp: {r_up}\n' - ) - profiler(msg) - if debug_print: - print(msg) - - # track the "major" curve as the curve with most - # dispersion. - if ( - dnt.rng is None - or ( - r_down < dnt.rng - and r_down < 0 - ) - ): - dnt.viz = viz - dnt.rng = r_down - dnt.in_view = in_view - dnt.start_t = in_view[0]['time'] - major_mn = ymn - - msg = f'NEW DOWN: {viz.name}@{chart_name} r:{r_down}\n' - profiler(msg) - if debug_print: - print(msg) - else: - # minor in the down swing range so check that if - # we apply the current rng to the minor that it - # doesn't go outside the current range for the major - # otherwise we recompute the minor's range (when - # adjusted for it's intersect point to be the new - # major's range. - intersect = intersect_from_longer( - dnt.start_t, - dnt.in_view, - start_t, - in_view, - ) - profiler(f'{viz.name}@{chart_name} intersect by t') - - if intersect: - longer_in_view, _t, i = intersect - - scaled_mn = dnt.apply_rng(y_ref) - if scaled_mn > ymn: - # after major curve scaling we detected - # the minor curve is still out of range - # so we need to adjust the major's range - # to include the new composed range. - y_maj_ref = longer_in_view[key] - new_major_ymn = ( - y_maj_ref - * - (1 + r_down) - ) - - # rewrite the major range to the new - # minor-pinned-to-major range and mark - # the transform as "virtual". - msg = ( - f'EXPAND DOWN bc {viz.name}@{chart_name}\n' - f'y_start epoch time @ {_t}:\n' - f'y_maj_ref @ {_t}: {y_maj_ref}\n' - f'R: {dnt.rng} -> {r_down}\n' - f'MN: {major_mn} -> {new_major_ymn}\n' - ) - dnt.rng = r_down - major_mn = dnt.y_val = new_major_ymn - profiler(msg) - if debug_print: - print(msg) - - if ( - upt.rng is None - or ( - r_up > upt.rng - and r_up > 0 - ) - ): - upt.rng = r_up - upt.viz = viz - upt.in_view = in_view - upt.start_t = in_view[0]['time'] - major_mx = ymx - msg = f'NEW UP: {viz.name}@{chart_name} r:{r_up}\n' - profiler(msg) - if debug_print: - print(msg) - - else: - intersect = intersect_from_longer( - upt.start_t, - upt.in_view, - start_t, - in_view, - ) - profiler(f'{viz.name}@{chart_name} intersect by t') - - if intersect: - longer_in_view, _t, i = intersect - - scaled_mx = upt.apply_rng(y_ref) - if scaled_mx < ymx: - # after major curve scaling we detected - # the minor curve is still out of range - # so we need to adjust the major's range - # to include the new composed range. - y_maj_ref = longer_in_view[key] - new_major_ymx = ( - y_maj_ref - * - (1 + r_up) - ) - - # rewrite the major range to the new - # minor-pinned-to-major range and mark - # the transform as "virtual". - msg = ( - f'EXPAND UP bc {viz.name}@{chart_name}:\n' - f'y_maj_ref @ {_t}: {y_maj_ref}\n' - f'R: {upt.rng} -> {r_up}\n' - f'MX: {major_mx} -> {new_major_ymx}\n' - ) - upt.rng = r_up - major_mx = upt.y_val = new_major_ymx - profiler(msg) - print(msg) - - # find curve with max dispersion - disp = abs(ymx - ymn) / y_ref - if disp > mx_disp: - major_viz = viz - mx_disp = disp - major_mn = ymn - major_mx = ymx - - profiler(f'{viz.name}@{chart_name} MINOR curve scale') - - # NOTE: if no there were no overlay charts - # detected/collected (could be either no group detected or - # chart with a single symbol, thus a single viz/overlay) - # then we ONLY set the lone chart's (viz) yrange and short - # circuit to the next chart in the linked charts loop. IOW - # there's no reason to go through the overlay dispersion - # scaling in the next loop below when only one curve is - # detected. - if ( - not mxmns_by_common_pi - and len(overlay_table) < 2 - ): - if debug_print: - print(f'ONLY ranging major: {viz.name}') - - out = _maybe_calc_yrange( - viz, - yrange_kwargs, - profiler, - chart_name, - ) - if out is None: - continue - - read_slc, yrange_kwargs = out - viz.plot.vb._set_yrange(**yrange_kwargs) - profiler(f'{viz.name}@{chart_name} single curve yrange') - - # move to next chart in linked set since - # no overlay transforming is needed. - continue - - elif ( - mxmns_by_common_pi - and not major_viz - ): - # move to next chart in linked set since - # no overlay transforming is needed. - continue - - profiler(f'<{chart_name}>.interact_graphics_cycle({name})') - - # if a minor curves scaling brings it "outside" the range of - # the major curve (in major curve co-domain terms) then we - # need to rescale the major to also include this range. The - # below placeholder denotes when this occurs. - # group_mxmn: None | tuple[float, float] = None - - # TODO: probably re-write this loop as a compiled cpython or - # numba func. - - # conduct "log-linearized multi-plot" scalings for all groups - for ( - view, - ( - viz, - y_start, - y_min, - y_max, - read_slc, - minor_in_view, - ) - ) in overlay_table.items(): - - key = 'open' if viz.is_ohlc else viz.name - - if ( - isinf(ymx) - or isinf(ymn) - ): - log.warning( - f'BAD ymx/ymn: {(ymn, ymx)}' - ) - continue - - ymn = dnt.apply_rng(y_start) - ymx = upt.apply_rng(y_start) - - # NOTE XXX: we have to set each curve's range once (and - # ONLY ONCE) here since we're doing this entire routine - # inside of a single render cycle (and apparently calling - # `ViewBox.setYRange()` multiple times within one only takes - # the first call as serious...) XD - view._set_yrange( - yrange=(ymn, ymx), - ) - profiler(f'{viz.name}@{chart_name} log-SCALE minor') - - if debug_print: - print( - '------------------------------\n' - f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' - f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' - f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' - f'y_start: {y_start}\n' - f'y min: {y_min}\n' - f'y max: {y_max}\n' - f'T scaled ymn: {ymn}\n' - f'T scaled ymx: {ymx}\n' - '------------------------------\n' - ) - - # profiler(f'{viz.name}@{chart_name} log-SCALE major') - # major_mx, major_mn = group_mxmn - # vrs = major_viz.plot.vb.viewRange() - # if vrs[1][0] > major_mn: - # breakpoint() - - if debug_print: - print( - f'END UX GRAPHICS CYCLE: @{chart_name}\n' - + - '#'*100 - + - '\n' - ) - if not do_linked_charts: - return - - profiler.finish() - - -def _maybe_calc_yrange( - viz: Viz, - yrange_kwargs: dict[Viz, dict[str, Any]], - profiler: Profiler, - chart_name: str, - -) -> tuple[ - slice, - dict, -] | None: - - if not viz.render: - return - - # pass in no array which will read and render from the last - # passed array (normally provided by the display loop.) - in_view, i_read_range, _ = viz.update_graphics() - - if not in_view: - return - - profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') - - # check if explicit yrange (kwargs) was passed in by the caller - yrange_kwargs = yrange_kwargs.get(viz) if yrange_kwargs else None - if yrange_kwargs is not None: - read_slc = slice(*i_read_range) - - else: - out = viz.maxmin(i_read_range=i_read_range) - if out is None: - log.warning(f'No yrange provided for {viz.name}!?') - return - ( - _, # ixrng, - read_slc, - yrange - ) = out - profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') - yrange_kwargs = {'yrange': yrange} - - return ( - read_slc, - yrange_kwargs, - ) - - -class OverlayT(Struct): - ''' - An overlay co-domain range transformer. - - Used to translate and apply a range from one y-range - to another based on a returns logarithm: - - R(ymn, ymx, yref) = (ymx - yref)/yref - - which gives the log-scale multiplier, and - - ymx_t = yref * (1 + R) - - which gives the inverse to translate to the same value - in the target co-domain. - - ''' - start_t: float | None = None - viz: Viz = None - - # % "range" computed from some ref value to the mn/mx - rng: float | None = None - in_view: np.ndarray | None = None - - # pinned-minor curve modified mn and max for the major dispersion - # curve due to one series being shorter and the pin + scaling from - # that pin point causing the original range to have to increase. - y_val: float | None = None - - def apply_rng( - self, - y_start: float, # reference value for dispersion metric - - ) -> float: - return y_start * (1 + self.rng) - - # def loglin_from_range( - # self, - - # y_ref: float, # reference value for dispersion metric - # mn: float, # min y in target log-lin range - # mx: float, # max y in target log-lin range - # offset: float, # y-offset to start log-scaling from - - # ) -> tuple[float, float]: - # r_up = (mx - y_ref) / y_ref - # r_down = (mn - y_ref) / y_ref - # ymn = offset * (1 + r_down) - # ymx = offset * (1 + r_up) - - # return ymn, ymx - - -def intersect_from_longer( - start_t_first: float, - in_view_first: np.ndarray, - - start_t_second: float, - in_view_second: np.ndarray, - -) -> np.ndarray: - - tdiff = start_t_first - start_t_second - - if tdiff == 0: - return False - - i: int = 0 - - # first time series has an "earlier" first time stamp then the 2nd. - # aka 1st is "shorter" then the 2nd. - if tdiff > 0: - longer = in_view_second - find_t = start_t_first - i = 1 - - # second time series has an "earlier" first time stamp then the 1st. - # aka 2nd is "shorter" then the 1st. - elif tdiff < 0: - longer = in_view_first - find_t = start_t_second - i = 0 - - slc = slice_from_time( - arr=longer, - start_t=find_t, - stop_t=find_t, - ) - return ( - longer[slc.start], - find_t, - i, - ) + return overlay_viewlists( + self._viz, + plots, + profiler, + do_overlay_scaling=do_overlay_scaling, + do_linked_charts=do_linked_charts, + yrange_kwargs=yrange_kwargs, + overlay_technique=overlay_technique, + ) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py new file mode 100644 index 0000000000..53f896b0ac --- /dev/null +++ b/piker/ui/view_mode.py @@ -0,0 +1,635 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +Overlay (aka multi-chart) UX machinery. + +''' +from __future__ import annotations +from math import ( + isinf, +) +from typing import ( + Any, + Literal, + TYPE_CHECKING, +) + +import numpy as np +import pyqtgraph as pg + +from ..data.types import Struct +from ..data._pathops import slice_from_time +from ..log import get_logger +from .._profile import Profiler + +if TYPE_CHECKING: + from ._chart import ChartPlotWidget + from ._dataviz import Viz + from ._interaction import ChartView + + +log = get_logger(__name__) + + +class OverlayT(Struct): + ''' + An overlay co-domain range transformer. + + Used to translate and apply a range from one y-range + to another based on a returns logarithm: + + R(ymn, ymx, yref) = (ymx - yref)/yref + + which gives the log-scale multiplier, and + + ymx_t = yref * (1 + R) + + which gives the inverse to translate to the same value + in the target co-domain. + + ''' + start_t: float | None = None + viz: Viz | None = None + + # % "range" computed from some ref value to the mn/mx + rng: float | None = None + in_view: np.ndarray | None = None + + # pinned-minor curve modified mn and max for the major dispersion + # curve due to one series being shorter and the pin + scaling from + # that pin point causing the original range to have to increase. + y_val: float | None = None + + def apply_rng( + self, + y_start: float, # reference value for dispersion metric + + ) -> float: + return y_start * (1 + self.rng) + + # def loglin_from_range( + # self, + + # y_ref: float, # reference value for dispersion metric + # mn: float, # min y in target log-lin range + # mx: float, # max y in target log-lin range + # offset: float, # y-offset to start log-scaling from + + # ) -> tuple[float, float]: + # r_up = (mx - y_ref) / y_ref + # r_down = (mn - y_ref) / y_ref + # ymn = offset * (1 + r_down) + # ymx = offset * (1 + r_up) + + # return ymn, ymx + + +def intersect_from_longer( + start_t_first: float, + in_view_first: np.ndarray, + + start_t_second: float, + in_view_second: np.ndarray, + +) -> np.ndarray: + + tdiff = start_t_first - start_t_second + + if tdiff == 0: + return False + + i: int = 0 + + # first time series has an "earlier" first time stamp then the 2nd. + # aka 1st is "shorter" then the 2nd. + if tdiff > 0: + longer = in_view_second + find_t = start_t_first + i = 1 + + # second time series has an "earlier" first time stamp then the 1st. + # aka 2nd is "shorter" then the 1st. + elif tdiff < 0: + longer = in_view_first + find_t = start_t_second + i = 0 + + slc = slice_from_time( + arr=longer, + start_t=find_t, + stop_t=find_t, + ) + return ( + longer[slc.start], + find_t, + i, + ) + + +def _maybe_calc_yrange( + viz: Viz, + yrange_kwargs: dict[Viz, dict[str, Any]], + profiler: Profiler, + chart_name: str, + +) -> tuple[ + slice, + dict, +] | None: + + if not viz.render: + return + + # pass in no array which will read and render from the last + # passed array (normally provided by the display loop.) + in_view, i_read_range, _ = viz.update_graphics() + + if not in_view: + return + + profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`') + + # check if explicit yrange (kwargs) was passed in by the caller + yrange_kwargs = yrange_kwargs.get(viz) if yrange_kwargs else None + if yrange_kwargs is not None: + read_slc = slice(*i_read_range) + + else: + out = viz.maxmin(i_read_range=i_read_range) + if out is None: + log.warning(f'No yrange provided for {viz.name}!?') + return + ( + _, # ixrng, + read_slc, + yrange + ) = out + profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`') + yrange_kwargs = {'yrange': yrange} + + return ( + read_slc, + yrange_kwargs, + ) + + +def overlay_viewlists( + active_viz: Viz, + plots: dict[str, ChartPlotWidget], + profiler: Profiler, + + # public config ctls + do_linked_charts: bool = True, + do_overlay_scaling: bool = True, + yrange_kwargs: dict[ + str, + tuple[float, float], + ] | None = None, + + overlay_technique: Literal[ + 'loglin_to_first', + 'loglin_to_sigma', + 'mnmx', + 'solo', + ] = 'loglin_to_first', + + + # internal instrumentation + debug_print: bool = False, + +) -> None: + ''' + Calculate and apply y-domain (axis y-range) multi-curve overlay adjustments + a set of ``plots`` based on the requested ``overlay_technique``. + + ''' + chart_name: str + chart: ChartPlotWidget + for chart_name, chart in plots.items(): + + # Common `PlotItem` maxmin table; presumes that some path + # graphics (and thus their backing data sets) are in the + # same co-domain and view box (since the were added + # a separate graphics objects to a common plot) and thus can + # be sorted as one set per plot. + mxmns_by_common_pi: dict[ + pg.PlotItem, + tuple[float, float], + ] = {} + + # proportional group auto-scaling per overlay set. + # -> loop through overlays on each multi-chart widget + # and scale all y-ranges based on autoscale config. + # -> for any "group" overlay we want to dispersion normalize + # and scale minor charts onto the major chart: the chart + # with the most dispersion in the set. + major_sigma_viz: Viz = None + major_mx: float = 0 + major_mn: float = float('inf') + mx_disp: float = 0 + + # collect certain flows have grapics objects **in seperate + # plots/viewboxes** into groups and do a common calc to + # determine auto-ranging input for `._set_yrange()`. + # this is primarly used for our so called "log-linearized + # multi-plot" overlay technique. + overlay_table: dict[ + ChartView, + tuple[ + Viz, + float, # y start + float, # y min + float, # y max + float, # y median + slice, # in-view array slice + np.ndarray, # in-view array + ], + ] = {} + + # ONLY auto-yrange the viz mapped to THIS view box + if not do_overlay_scaling: + viz = active_viz + if debug_print: + print(f'ONLY ranging THIS viz: {viz.name}') + + out = _maybe_calc_yrange( + viz, + yrange_kwargs, + profiler, + chart_name, + ) + if out is None: + continue + + read_slc, yrange_kwargs = out + viz.plot.vb._set_yrange(**yrange_kwargs) + profiler(f'{viz.name}@{chart_name} single curve yrange') + + # don't iterate overlays, just move to next chart + continue + + # create a group overlay log-linearized y-range transform to + # track and eventually inverse transform all overlay curves + # to a common target max dispersion range. + dnt = OverlayT() + upt = OverlayT() + + if debug_print: + print( + f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' + + + '#'*100 + + + '\n' + ) + + for name, viz in chart._vizs.items(): + + out = _maybe_calc_yrange( + viz, + yrange_kwargs, + profiler, + chart_name, + ) + if out is None: + continue + + read_slc, yrange_kwargs = out + yrange = yrange_kwargs['yrange'] + pi = viz.plot + + # handle multiple graphics-objs per viewbox cases + mxmn = mxmns_by_common_pi.get(pi) + if mxmn: + yrange = mxmns_by_common_pi[pi] = ( + min(yrange[0], mxmn[0]), + max(yrange[1], mxmn[1]), + ) + + else: + mxmns_by_common_pi[pi] = yrange + + profiler(f'{viz.name}@{chart_name} common pi sort') + + # non-overlay group case + if not viz.is_ohlc: + pi.vb._set_yrange(yrange=yrange) + profiler( + f'{viz.name}@{chart_name} simple std `._set_yrange()`' + ) + + # handle overlay log-linearized group scaling cases + # TODO: a better predicate here, likely something + # to do with overlays and their settings.. + # TODO: we probably eventually might want some other + # charts besides OHLC? + else: + ymn, ymx = yrange + + # determine start datum in view + arr = viz.shm.array + in_view = arr[read_slc] + if not in_view.size: + log.warning(f'{viz.name} not in view?') + continue + + # row_start = arr[read_slc.start - 1] + row_start = arr[read_slc.start] + + if viz.is_ohlc: + y_ref = row_start['open'] + else: + y_ref = row_start[viz.name] + + profiler(f'{viz.name}@{chart_name} MINOR curve median') + + overlay_table[viz.plot.vb] = ( + viz, + y_ref, + ymn, + ymx, + read_slc, + in_view, + ) + + key = 'open' if viz.is_ohlc else viz.name + start_t = in_view[0]['time'] + r_down = (ymn - y_ref) / y_ref + r_up = (ymx - y_ref) / y_ref + + msg = ( + f'### {viz.name}@{chart_name} ###\n' + f'y_ref: {y_ref}\n' + f'down disp: {r_down}\n' + f'up disp: {r_up}\n' + ) + profiler(msg) + if debug_print: + print(msg) + + # track the "major" curve as the curve with most + # dispersion. + if ( + dnt.rng is None + or ( + r_down < dnt.rng + and r_down < 0 + ) + ): + dnt.viz = viz + dnt.rng = r_down + dnt.in_view = in_view + dnt.start_t = in_view[0]['time'] + major_mn = ymn + + msg = f'NEW DOWN: {viz.name}@{chart_name} r:{r_down}\n' + profiler(msg) + if debug_print: + print(msg) + else: + # minor in the down swing range so check that if + # we apply the current rng to the minor that it + # doesn't go outside the current range for the major + # otherwise we recompute the minor's range (when + # adjusted for it's intersect point to be the new + # major's range. + intersect = intersect_from_longer( + dnt.start_t, + dnt.in_view, + start_t, + in_view, + ) + profiler(f'{viz.name}@{chart_name} intersect by t') + + if intersect: + longer_in_view, _t, i = intersect + + scaled_mn = dnt.apply_rng(y_ref) + if scaled_mn > ymn: + # after major curve scaling we detected + # the minor curve is still out of range + # so we need to adjust the major's range + # to include the new composed range. + y_maj_ref = longer_in_view[key] + new_major_ymn = ( + y_maj_ref + * + (1 + r_down) + ) + + # rewrite the major range to the new + # minor-pinned-to-major range and mark + # the transform as "virtual". + msg = ( + f'EXPAND DOWN bc {viz.name}@{chart_name}\n' + f'y_start epoch time @ {_t}:\n' + f'y_maj_ref @ {_t}: {y_maj_ref}\n' + f'R: {dnt.rng} -> {r_down}\n' + f'MN: {major_mn} -> {new_major_ymn}\n' + ) + dnt.rng = r_down + major_mn = dnt.y_val = new_major_ymn + profiler(msg) + if debug_print: + print(msg) + + if ( + upt.rng is None + or ( + r_up > upt.rng + and r_up > 0 + ) + ): + upt.rng = r_up + upt.viz = viz + upt.in_view = in_view + upt.start_t = in_view[0]['time'] + major_mx = ymx + msg = f'NEW UP: {viz.name}@{chart_name} r:{r_up}\n' + profiler(msg) + if debug_print: + print(msg) + + else: + intersect = intersect_from_longer( + upt.start_t, + upt.in_view, + start_t, + in_view, + ) + profiler(f'{viz.name}@{chart_name} intersect by t') + + if intersect: + longer_in_view, _t, i = intersect + + scaled_mx = upt.apply_rng(y_ref) + if scaled_mx < ymx: + # after major curve scaling we detected + # the minor curve is still out of range + # so we need to adjust the major's range + # to include the new composed range. + y_maj_ref = longer_in_view[key] + new_major_ymx = ( + y_maj_ref + * + (1 + r_up) + ) + + # rewrite the major range to the new + # minor-pinned-to-major range and mark + # the transform as "virtual". + msg = ( + f'EXPAND UP bc {viz.name}@{chart_name}:\n' + f'y_maj_ref @ {_t}: {y_maj_ref}\n' + f'R: {upt.rng} -> {r_up}\n' + f'MX: {major_mx} -> {new_major_ymx}\n' + ) + upt.rng = r_up + major_mx = upt.y_val = new_major_ymx + profiler(msg) + print(msg) + + # find curve with max dispersion + disp = abs(ymx - ymn) / y_ref + if disp > mx_disp: + major_sigma_viz = viz + mx_disp = disp + major_mn = ymn + major_mx = ymx + + profiler(f'{viz.name}@{chart_name} MINOR curve scale') + + # NOTE: if no there were no overlay charts + # detected/collected (could be either no group detected or + # chart with a single symbol, thus a single viz/overlay) + # then we ONLY set the lone chart's (viz) yrange and short + # circuit to the next chart in the linked charts loop. IOW + # there's no reason to go through the overlay dispersion + # scaling in the next loop below when only one curve is + # detected. + if ( + not mxmns_by_common_pi + and len(overlay_table) < 2 + ): + if debug_print: + print(f'ONLY ranging major: {viz.name}') + + out = _maybe_calc_yrange( + viz, + yrange_kwargs, + profiler, + chart_name, + ) + if out is None: + continue + + read_slc, yrange_kwargs = out + viz.plot.vb._set_yrange(**yrange_kwargs) + profiler(f'{viz.name}@{chart_name} single curve yrange') + + # move to next chart in linked set since + # no overlay transforming is needed. + continue + + elif ( + mxmns_by_common_pi + and not major_sigma_viz + ): + # move to next chart in linked set since + # no overlay transforming is needed. + continue + + profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + + # if a minor curves scaling brings it "outside" the range of + # the major curve (in major curve co-domain terms) then we + # need to rescale the major to also include this range. The + # below placeholder denotes when this occurs. + # group_mxmn: None | tuple[float, float] = None + + # TODO: probably re-write this loop as a compiled cpython or + # numba func. + + # conduct "log-linearized multi-plot" scalings for all groups + for ( + view, + ( + viz, + y_start, + y_min, + y_max, + read_slc, + minor_in_view, + ) + ) in overlay_table.items(): + + key = 'open' if viz.is_ohlc else viz.name + + if ( + isinf(ymx) + or isinf(ymn) + ): + log.warning( + f'BAD ymx/ymn: {(ymn, ymx)}' + ) + continue + + ymn = dnt.apply_rng(y_start) + ymx = upt.apply_rng(y_start) + + # NOTE XXX: we have to set each curve's range once (and + # ONLY ONCE) here since we're doing this entire routine + # inside of a single render cycle (and apparently calling + # `ViewBox.setYRange()` multiple times within one only takes + # the first call as serious...) XD + view._set_yrange( + yrange=(ymn, ymx), + ) + profiler(f'{viz.name}@{chart_name} log-SCALE minor') + + if debug_print: + print( + '------------------------------\n' + f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' + f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' + f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' + f'y_start: {y_start}\n' + f'y min: {y_min}\n' + f'y max: {y_max}\n' + f'T scaled ymn: {ymn}\n' + f'T scaled ymx: {ymx}\n' + '------------------------------\n' + ) + + # profiler(f'{viz.name}@{chart_name} log-SCALE major') + # major_mx, major_mn = group_mxmn + # vrs = major_sigma_viz.plot.vb.viewRange() + # if vrs[1][0] > major_mn: + # breakpoint() + + if debug_print: + print( + f'END UX GRAPHICS CYCLE: @{chart_name}\n' + + + '#'*100 + + + '\n' + ) + if not do_linked_charts: + return + + profiler.finish() From 98b7d784760f5947263b18ac276d38717fd8132d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 21 Feb 2023 19:46:26 -0500 Subject: [PATCH 096/136] Drop (now) unused major curve mx/mn variables --- piker/ui/view_mode.py | 63 +++++++++++++++++-------------------------- 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 53f896b0ac..0e55a051bb 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -76,10 +76,10 @@ class OverlayT(Struct): def apply_rng( self, - y_start: float, # reference value for dispersion metric + y_ref: float, # reference value for dispersion metric ) -> float: - return y_start * (1 + self.rng) + return y_ref * (1 + self.rng) # def loglin_from_range( # self, @@ -207,7 +207,6 @@ def overlay_viewlists( 'solo', ] = 'loglin_to_first', - # internal instrumentation debug_print: bool = False, @@ -238,8 +237,6 @@ def overlay_viewlists( # and scale minor charts onto the major chart: the chart # with the most dispersion in the set. major_sigma_viz: Viz = None - major_mx: float = 0 - major_mn: float = float('inf') mx_disp: float = 0 # collect certain flows have grapics objects **in seperate @@ -357,15 +354,6 @@ def overlay_viewlists( profiler(f'{viz.name}@{chart_name} MINOR curve median') - overlay_table[viz.plot.vb] = ( - viz, - y_ref, - ymn, - ymx, - read_slc, - in_view, - ) - key = 'open' if viz.is_ohlc else viz.name start_t = in_view[0]['time'] r_down = (ymn - y_ref) / y_ref @@ -394,7 +382,7 @@ def overlay_viewlists( dnt.rng = r_down dnt.in_view = in_view dnt.start_t = in_view[0]['time'] - major_mn = ymn + dnt.y_val = ymn msg = f'NEW DOWN: {viz.name}@{chart_name} r:{r_down}\n' profiler(msg) @@ -425,11 +413,7 @@ def overlay_viewlists( # so we need to adjust the major's range # to include the new composed range. y_maj_ref = longer_in_view[key] - new_major_ymn = ( - y_maj_ref - * - (1 + r_down) - ) + new_major_ymn = y_maj_ref * (1 + r_down) # rewrite the major range to the new # minor-pinned-to-major range and mark @@ -439,10 +423,10 @@ def overlay_viewlists( f'y_start epoch time @ {_t}:\n' f'y_maj_ref @ {_t}: {y_maj_ref}\n' f'R: {dnt.rng} -> {r_down}\n' - f'MN: {major_mn} -> {new_major_ymn}\n' + f'MN: {dnt.y_val} -> {new_major_ymn}\n' ) dnt.rng = r_down - major_mn = dnt.y_val = new_major_ymn + dnt.y_val = new_major_ymn profiler(msg) if debug_print: print(msg) @@ -458,7 +442,7 @@ def overlay_viewlists( upt.viz = viz upt.in_view = in_view upt.start_t = in_view[0]['time'] - major_mx = ymx + upt.y_val = ymx msg = f'NEW UP: {viz.name}@{chart_name} r:{r_up}\n' profiler(msg) if debug_print: @@ -476,18 +460,14 @@ def overlay_viewlists( if intersect: longer_in_view, _t, i = intersect + # after major curve scaling we detect if + # the minor curve is still out of range + # so we need to adjust the major's range + # to include the new composed range. scaled_mx = upt.apply_rng(y_ref) if scaled_mx < ymx: - # after major curve scaling we detected - # the minor curve is still out of range - # so we need to adjust the major's range - # to include the new composed range. y_maj_ref = longer_in_view[key] - new_major_ymx = ( - y_maj_ref - * - (1 + r_up) - ) + new_major_ymx = y_maj_ref * (1 + r_up) # rewrite the major range to the new # minor-pinned-to-major range and mark @@ -496,10 +476,10 @@ def overlay_viewlists( f'EXPAND UP bc {viz.name}@{chart_name}:\n' f'y_maj_ref @ {_t}: {y_maj_ref}\n' f'R: {upt.rng} -> {r_up}\n' - f'MX: {major_mx} -> {new_major_ymx}\n' + f'MX: {upt.y_val} -> {new_major_ymx}\n' ) upt.rng = r_up - major_mx = upt.y_val = new_major_ymx + upt.y_val = new_major_ymx profiler(msg) print(msg) @@ -508,10 +488,17 @@ def overlay_viewlists( if disp > mx_disp: major_sigma_viz = viz mx_disp = disp - major_mn = ymn - major_mx = ymx - profiler(f'{viz.name}@{chart_name} MINOR curve scale') + overlay_table[viz.plot.vb] = ( + viz, + y_ref, + ymn, + ymx, + read_slc, + in_view, + ) + + profiler(f'{viz.name}@{chart_name} yrange scan complete') # NOTE: if no there were no overlay charts # detected/collected (could be either no group detected or @@ -615,8 +602,6 @@ def overlay_viewlists( '------------------------------\n' ) - # profiler(f'{viz.name}@{chart_name} log-SCALE major') - # major_mx, major_mn = group_mxmn # vrs = major_sigma_viz.plot.vb.viewRange() # if vrs[1][0] > major_mn: # breakpoint() From 26690b061bc4f4954dd9c5550a39d9735755baf7 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 21 Feb 2023 19:46:50 -0500 Subject: [PATCH 097/136] Make slow chart a teensie bit smaller --- piker/ui/_chart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index dbc9f495c9..d4dadab00d 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -488,7 +488,7 @@ def set_split_sizes( prop = 3/8 h = self.height() - histview_h = h * (6/16) + histview_h = h * (4/11) h = h - histview_h major = 1 - prop From 62e0889bf52cca9f9b71dbf06e24059237a18312 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 24 Feb 2023 13:38:45 -0500 Subject: [PATCH 098/136] Add `Viz.view_state: ViewState` Adds a small struct which is used to track the most recently viewed data's x/y ranges as well as the last `Viz.read()` "in view" array data for fast access by chart related graphics processing code, namely view mode overlay handling. Also adds new `Viz` interfaces: - `Viz.ds_yrange: tuple[float, float]' which replaces the previous `.yrange` (now set by `.datums_range()` on manual y-range calcs) so that the m4 downsampler can set this field specifically and then it get used (when available) by `Viz.maxmin()`. - `Viz.scalars_from_index()` a new returns-scalar generator which can be used to calc the up and down returns values (used for scaling overlay y-ranges) from an input `xref` x-domain index which maps to some `Ci(xref) = yref`. --- piker/ui/_dataviz.py | 169 ++++++++++++++++++++++++++++++++----------- 1 file changed, 127 insertions(+), 42 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 73a0ab6b1f..2900853f48 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -29,7 +29,10 @@ TYPE_CHECKING, ) -import msgspec +from msgspec import ( + Struct, + field, +) import numpy as np import pyqtgraph as pg from PyQt5.QtCore import QLineF @@ -225,15 +228,49 @@ def render_baritems( _sample_rates: set[float] = {1, 60} -class Viz(msgspec.Struct): # , frozen=True): +class ViewState(Struct): + ''' + Indexing objects representing the current view x-range -> y-range. + + ''' + # (xl, xr) "input" view range in x-domain + xrange: tuple[ + float | int, + float | int + ] | None = None + + # (ymn, ymx) "output" min and max in viewed y-codomain + yrange: tuple[ + float | int, + float | int + ] | None = None + + # last in view ``ShmArray.array[read_slc]`` data + in_view: np.ndarray | None = None + + +class Viz(Struct): # , frozen=True): ''' (Data) "Visualization" compound type which wraps a real-time shm array stream with displayed graphics (curves, charts) for high level access and control as well as efficient incremental - update. + update, oriented around the idea of a "view state". + + The (backend) intention is for this interface and type is to + eventually be capable of shm-passing of incrementally updated + graphics stream data, thus providing a cross-actor solution to + sharing UI-related update state potentionally in a (compressed) + binary-interchange format. + + Further, from an interaction-triggers-view-in-UI perspective, this type + operates as a transform: + (x_left, x_right) -> output metrics {ymn, ymx, uppx, ...} - The intention is for this type to eventually be capable of shm-passing - of incrementally updated graphics stream data between actors. + wherein each x-domain range maps to some output set of (graphics + related) vizualization metrics. In further documentation we often + refer to this abstraction as a vizualization curve: Ci. Each Ci is + considered a function which maps an x-range (input view range) to + a multi-variate (metrics) output. ''' name: str @@ -242,8 +279,12 @@ class Viz(msgspec.Struct): # , frozen=True): flume: Flume graphics: Curve | BarItems - # for tracking y-mn/mx for y-axis auto-ranging - yrange: tuple[float, float] = None + view_state: ViewState = field(default_factory=ViewState) + + # last calculated y-mn/mx from m4 downsample code, this + # is updated in the body of `Renderer.render()`. + ds_yrange: tuple[float, float] | None = None + yrange: tuple[float, float] | None = None # in some cases a viz may want to change its # graphical "type" or, "form" when downsampling, to @@ -264,7 +305,7 @@ class Viz(msgspec.Struct): # , frozen=True): ] = 'time' - # downsampling state + # TODO: maybe compound this into a downsampling state type? _last_uppx: float = 0 _in_ds: bool = False _index_step: float | None = None @@ -303,14 +344,23 @@ def shm(self) -> ShmArray: @property def index_field(self) -> str: + ''' + The column name as ``str`` in the underlying ``._shm: ShmArray`` + which will deliver the "index" array. + + ''' return self._index_field def index_step( self, reset: bool = False, - ) -> float: + ''' + Return the size between sample steps in the units of the + x-domain, normally either an ``int`` array index size or an + epoch time in seconds. + ''' # attempt to dectect the best step size by scanning a sample of # the source data. if self._index_step is None: @@ -393,7 +443,7 @@ def maxmin( # TODO: hash the slice instead maybe? # https://stackoverflow.com/a/29980872 - lbar, rbar = ixrng = round(x_range[0]), round(x_range[1]) + ixrng = lbar, rbar = round(x_range[0]), round(x_range[1]) if use_caching: cached_result = self._mxmns.get(ixrng) @@ -436,8 +486,8 @@ def maxmin( ) return None - elif self.yrange: - mxmn = self.yrange + elif self.ds_yrange: + mxmn = self.ds_yrange if do_print: print( f'{self.name} M4 maxmin:\n' @@ -477,19 +527,6 @@ def maxmin( mxmn, ) - @lru_cache(maxsize=6116) - def median_from_range( - self, - start: int, - stop: int, - - ) -> float: - in_view = self.shm.array[start:stop] - if self.is_ohlc: - return np.median(in_view['close']) - else: - return np.median(in_view[self.name]) - def view_range(self) -> tuple[int, int]: ''' Return the start and stop x-indexes for the managed ``ViewBox``. @@ -514,7 +551,7 @@ def datums_range( self, view_range: None | tuple[float, float] = None, index_field: str | None = None, - array: None | np.ndarray = None, + array: np.ndarray | None = None, ) -> tuple[ int, int, int, int, int, int @@ -527,29 +564,34 @@ def datums_range( index_field: str = index_field or self.index_field if index_field == 'index': - l, r = round(l), round(r) + l: int = round(l) + r: int = round(r) if array is None: array = self.shm.array index = array[index_field] - first = floor(index[0]) - last = ceil(index[-1]) + first: int = floor(index[0]) + last: int = ceil(index[-1]) # first and last datums in view determined by - # l / r view range. - leftmost = floor(l) - rightmost = ceil(r) + # l -> r view range. + leftmost: int = floor(l) + rightmost: int = ceil(r) # invalid view state if ( r < l or l < 0 or r < 0 - or (l > last and r > last) + or ( + l > last + and r > last + ) ): - leftmost = first - rightmost = last + leftmost: int = first + rightmost: int = last + else: rightmost = max( min(last, rightmost), @@ -562,7 +604,10 @@ def datums_range( rightmost - 1, ) - assert leftmost < rightmost + # sanity + # assert leftmost < rightmost + + self.view_state.xrange = leftmost, rightmost return ( l, # left x-in-view @@ -591,11 +636,9 @@ def read( ''' index_field: str = index_field or self.index_field - vr = l, r = self.view_range() # readable data array = self.shm.array - if profiler: profiler('self.shm.array READ') @@ -607,7 +650,6 @@ def read( ilast, r, ) = self.datums_range( - view_range=vr, index_field=index_field, array=array, ) @@ -629,11 +671,14 @@ def read( # above? in_view = array[read_slc] if in_view.size: + self.view_state.in_view = in_view abs_indx = in_view['index'] abs_slc = slice( int(abs_indx[0]), int(abs_indx[-1]), ) + else: + self.view_state.in_view = None if profiler: profiler( @@ -654,10 +699,11 @@ def read( # BUT the ``in_view`` slice DOES.. read_slc = slice(lbar_i, rbar_i) in_view = array[lbar_i: rbar_i + 1] + self.view_state.in_view = in_view # in_view = array[lbar_i-1: rbar_i+1] - # XXX: same as ^ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] + if profiler: profiler('index arithmetic for slicing') @@ -692,8 +738,8 @@ def update_graphics( pg.GraphicsObject, ]: ''' - Read latest datums from shm and render to (incrementally) - render to graphics. + Read latest datums from shm and (incrementally) render to + graphics. ''' profiler = Profiler( @@ -1262,3 +1308,42 @@ def px_width(self) -> float: vr, 0, ) ).length() + + @lru_cache(maxsize=6116) + def median_from_range( + self, + start: int, + stop: int, + + ) -> float: + in_view = self.shm.array[start:stop] + if self.is_ohlc: + return np.median(in_view['close']) + else: + return np.median(in_view[self.name]) + + @lru_cache(maxsize=6116) + def dispersion( + start: int, + stop: int, + + ) -> float: + pass + + def scalars_from_index( + self, + xref: float, + + ) -> tuple[float, float]: + arr = self.view_state.in_view + slc = slice_from_time( + arr=self.view_state.in_view, + start_t=xref, + stop_t=xref, + ) + yref = arr[slc.start] + ymn, ymx = self.view_state.yrange + return ( + (ymn - yref) / yref, + (ymx - yref) / yref, + ) From 8fd5c67f2afc62f6ff96f8cd8574990e4cdd7096 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 26 Feb 2023 16:13:24 -0500 Subject: [PATCH 099/136] Drop last lingering usage of `Viz.bars_range()` --- piker/ui/_interaction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 0a972d8c82..2200dae076 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -509,7 +509,7 @@ def wheelEvent( # don't zoom more then the min points setting viz = chart.get_viz(chart.name) - vl, lbar, rbar, vr = viz.bars_range() + _, vl, lbar, rbar, vr, _ = viz.datums_range() # TODO: max/min zoom limits incorporating time step size. # rl = vr - vl From 29418e96557bd4be44edbe06fe66524b25e9b19a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 27 Feb 2023 11:51:19 -0500 Subject: [PATCH 100/136] Avoid index-from-time slicing including gaps Not sure why this was ever allowed but, for slicing to the sample *before* whatever target time stamp is passed in we should definitely not return the prior index as for the slice start since that might include a very large gap prior to whatever sample is scanned to have the earliest matching time stamp. This was essential to fixing overlay intersect points searching in our ``ui.view_mode`` machinery.. --- piker/data/_pathops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index adaed041bf..d8c15511d6 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -395,7 +395,7 @@ def slice_from_time( # f'diff: {t_diff}\n' # f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n' # ) - read_i_start = new_read_i_start - 1 + read_i_start = new_read_i_start t_iv_stop = times[read_i_stop - 1] if ( From 4d11c5c89cb0d21860e12b34d416cf618d53ad65 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 27 Feb 2023 14:18:41 -0500 Subject: [PATCH 101/136] Add cached dispersion methods to `Viz` In an effort to make overlay calcs cleaner and leverage caching of view range -> dispersion measures, this adds the following new methods: - `._dispersion()` an lru cached returns scalar calculator given input y-range and y-ref values. - `.disp_from_range()` which calls the above method and returns variable output depending on requested calc `method: str`. - `.i_from_t()` a currently unused cached method for slicing the in-view's array index from time stamp (though not working yet due to needing to parameterize the cache by the input `.vs.xrange`). Further refinements/adjustments: - rename `.view_state: ViewState` -> `.vs`. - drop the `.bars_range()` method as it's no longer used anywhere else in the code base. - always set the `ViewState.in_view: np.ndarray` inside `.read()`. - return the start array index (from slice) and `yref` value @ `xref` from `.scalars_from_index()` to aid with "pin to curve" rescaling caused by out-of-range pinned-minor curves. --- piker/ui/_dataviz.py | 129 ++++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 33 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 2900853f48..f85022f7f2 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -279,7 +279,7 @@ class Viz(Struct): # , frozen=True): flume: Flume graphics: Curve | BarItems - view_state: ViewState = field(default_factory=ViewState) + vs: ViewState = field(default_factory=ViewState) # last calculated y-mn/mx from m4 downsample code, this # is updated in the body of `Renderer.render()`. @@ -520,6 +520,7 @@ def maxmin( # cache result for input range assert mxmn self._mxmns[ixrng] = (read_slc, mxmn) + self.vs.yrange = mxmn profiler(f'yrange mxmn cacheing: {x_range} -> {mxmn}') return ( ixrng, @@ -538,15 +539,6 @@ def view_range(self) -> tuple[int, int]: vr.right(), ) - def bars_range(self) -> tuple[int, int, int, int]: - ''' - Return a range tuple for the left-view, left-datum, right-datum - and right-view x-indices. - - ''' - l, start, datum_start, datum_stop, stop, r = self.datums_range() - return l, datum_start, datum_stop, r - def datums_range( self, view_range: None | tuple[float, float] = None, @@ -574,11 +566,6 @@ def datums_range( first: int = floor(index[0]) last: int = ceil(index[-1]) - # first and last datums in view determined by - # l -> r view range. - leftmost: int = floor(l) - rightmost: int = ceil(r) - # invalid view state if ( r < l @@ -593,13 +580,15 @@ def datums_range( rightmost: int = last else: + # determine first and last datums in view determined by + # l -> r view range. rightmost = max( - min(last, rightmost), + min(last, ceil(r)), first, ) leftmost = min( - max(first, leftmost), + max(first, floor(l)), last, rightmost - 1, ) @@ -607,7 +596,7 @@ def datums_range( # sanity # assert leftmost < rightmost - self.view_state.xrange = leftmost, rightmost + self.vs.xrange = leftmost, rightmost return ( l, # left x-in-view @@ -671,14 +660,14 @@ def read( # above? in_view = array[read_slc] if in_view.size: - self.view_state.in_view = in_view + self.vs.in_view = in_view abs_indx = in_view['index'] abs_slc = slice( int(abs_indx[0]), int(abs_indx[-1]), ) else: - self.view_state.in_view = None + self.vs.in_view = None if profiler: profiler( @@ -699,7 +688,7 @@ def read( # BUT the ``in_view`` slice DOES.. read_slc = slice(lbar_i, rbar_i) in_view = array[lbar_i: rbar_i + 1] - self.view_state.in_view = in_view + self.vs.in_view = in_view # in_view = array[lbar_i-1: rbar_i+1] # XXX: same as ^ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] @@ -1323,27 +1312,101 @@ def median_from_range( return np.median(in_view[self.name]) @lru_cache(maxsize=6116) - def dispersion( - start: int, - stop: int, + def _dispersion( + self, + # xrange: tuple[float, float], + ymn: float, + ymx: float, + yref: float, - ) -> float: - pass + ) -> tuple[float, float]: + return ( + (ymx - yref) / yref, + (ymn - yref) / yref, + ) + + def disp_from_range( + self, + xrange: tuple[float, float] | None = None, + yref: float | None = None, + method: Literal[ + 'up', + 'down', + 'full', # both sides + 'both', # both up and down as separate scalars + + ] = 'full', + + ) -> float | tuple[float, float] | None: + ''' + Return a dispersion metric referenced from an optionally + provided ``yref`` or the left-most datum level by default. + + ''' + vs = self.vs + yrange = vs.yrange + if yrange is None: + return None + + ymn, ymx = yrange + key = 'open' if self.is_ohlc else self.name + yref = yref or vs.in_view[0][key] + # xrange = xrange or vs.xrange + + # call into the lru_cache-d sigma calculator method + r_up, r_down = self._dispersion(ymn, ymx, yref) + match method: + case 'full': + return r_up - r_down + case 'up': + return r_up + case 'down': + return r_up + case 'both': + return r_up, r_down + + @lru_cache(maxsize=6116) + def i_from_t( + self, + t: float, + ) -> int: + return slice_from_time( + self.vs.in_view, + start_t=t, + stop_t=t, + step=self.index_step(), + ).start def scalars_from_index( self, - xref: float, + xref: float | None = None, + + ) -> tuple[int, float, float, float]: + + vs = self.vs + arr = vs.in_view + + # TODO: make this work by parametrizing over input + # .vs.xrange input for caching? + # read_slc_start = self.i_from_t(xref) - ) -> tuple[float, float]: - arr = self.view_state.in_view slc = slice_from_time( - arr=self.view_state.in_view, + arr=self.vs.in_view, start_t=xref, stop_t=xref, ) - yref = arr[slc.start] - ymn, ymx = self.view_state.yrange + read_slc_start = slc.start + + key = 'open' if self.is_ohlc else self.name + yref = arr[read_slc_start][key] + ymn, ymx = self.vs.yrange + # print( + # f'INTERSECT xref: {read_slc_start}\n' + # f'ymn, ymx: {(ymn, ymx)}\n' + # ) return ( - (ymn - yref) / yref, + read_slc_start, + yref, (ymx - yref) / yref, + (ymn - yref) / yref, ) From 6601dea8ccf19181135963a04705fd79764cad08 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 27 Feb 2023 15:55:37 -0500 Subject: [PATCH 102/136] Support "pin-to-target-curve" overlay method again Yah yah, i know it's the same as before (the N > 2 curves case with out-of-range-minor rescaling the previously scaled curves isn't fixed yet...) but, this is a much better and optional implementation in less code. Further we're now better leveraging various new cached properties and methods on `Viz`. We now handle different `overlay_technique: str` options using `match:` syntax in the 2ndary scaling loop, stash the returns scalars per curve in `overlay_table`, and store and iterate the curves by dispersion measure sort order. Further wrt "pin-to-target-curve" mode, which currently still pins to the largest measured dispersion curve in the overlay set: - pop major Ci overlay table entries at start for sub-calcs usage when handling the "minor requires major rescale after pin" case. - (finally) correctly rescale the major curve y-mxmn to whatever the latest minor overlay curve by calcing the inverse transform from the minor *at that point*: - the intersect point being that which the minor has starts support on the major's x-domain* using the new `Viz.scalars_from_index()` and, - checking that the minor is not out of range (versus what the major's transform calcs it to be, in which case, - calc the inverse transform from the current out-of-range minor and use it to project the new y-mxmn for the major/target based on the same intersect-reference point in the x-domain used by the minor. - always handle the target-major Ci specially by only setting the `mx_ymn` / `mx_ymn` value when iterating that entry in the overlay table. - add todos around also doing the last sub-sub bullet for all previously major-transform scaled minor overlays (this is coming next..i hope). - add a final 3rd overlay loop which goes through a final `scaled: dict` to apply all range values to each view; this is where we will eventually solve that last edge case of an out-of-range minor's scaling needing to be used to rescale already scaled minors XD --- piker/ui/view_mode.py | 209 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 173 insertions(+), 36 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 0e55a051bb..73051d060a 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -62,8 +62,8 @@ class OverlayT(Struct): in the target co-domain. ''' - start_t: float | None = None viz: Viz | None = None + start_t: float | None = None # % "range" computed from some ref value to the mn/mx rng: float | None = None @@ -201,13 +201,13 @@ def overlay_viewlists( ] | None = None, overlay_technique: Literal[ - 'loglin_to_first', - 'loglin_to_sigma', - 'mnmx', + 'loglin_ref_to_curve', + 'loglin_ref_to_first', + 'mxmn', 'solo', - ] = 'loglin_to_first', + ] = 'loglin_ref_to_curve', - # internal instrumentation + # internal debug debug_print: bool = False, ) -> None: @@ -236,8 +236,6 @@ def overlay_viewlists( # -> for any "group" overlay we want to dispersion normalize # and scale minor charts onto the major chart: the chart # with the most dispersion in the set. - major_sigma_viz: Viz = None - mx_disp: float = 0 # collect certain flows have grapics objects **in seperate # plots/viewboxes** into groups and do a common calc to @@ -245,8 +243,9 @@ def overlay_viewlists( # this is primarly used for our so called "log-linearized # multi-plot" overlay technique. overlay_table: dict[ - ChartView, + float, tuple[ + ChartView, Viz, float, # y start float, # y min @@ -254,6 +253,8 @@ def overlay_viewlists( float, # y median slice, # in-view array slice np.ndarray, # in-view array + float, # returns up scalar + float, # return down scalar ], ] = {} @@ -323,11 +324,15 @@ def overlay_viewlists( profiler(f'{viz.name}@{chart_name} common pi sort') # non-overlay group case - if not viz.is_ohlc: + if ( + not viz.is_ohlc + or overlay_technique == 'solo' + ): pi.vb._set_yrange(yrange=yrange) profiler( f'{viz.name}@{chart_name} simple std `._set_yrange()`' ) + continue # handle overlay log-linearized group scaling cases # TODO: a better predicate here, likely something @@ -338,15 +343,12 @@ def overlay_viewlists( ymn, ymx = yrange # determine start datum in view - arr = viz.shm.array - in_view = arr[read_slc] + in_view = viz.vs.in_view if not in_view.size: log.warning(f'{viz.name} not in view?') continue - # row_start = arr[read_slc.start - 1] - row_start = arr[read_slc.start] - + row_start = in_view[0] if viz.is_ohlc: y_ref = row_start['open'] else: @@ -355,9 +357,11 @@ def overlay_viewlists( profiler(f'{viz.name}@{chart_name} MINOR curve median') key = 'open' if viz.is_ohlc else viz.name - start_t = in_view[0]['time'] - r_down = (ymn - y_ref) / y_ref + start_t = row_start['time'] + + # returns scalars r_up = (ymx - y_ref) / y_ref + r_down = (ymn - y_ref) / y_ref msg = ( f'### {viz.name}@{chart_name} ###\n' @@ -431,6 +435,8 @@ def overlay_viewlists( if debug_print: print(msg) + # is the current up `OverlayT` not yet defined or + # the current `r_up` greater then the previous max. if ( upt.rng is None or ( @@ -483,27 +489,37 @@ def overlay_viewlists( profiler(msg) print(msg) - # find curve with max dispersion - disp = abs(ymx - ymn) / y_ref - if disp > mx_disp: - major_sigma_viz = viz - mx_disp = disp + # disp = viz.disp_from_range(yref=y_ref) + # if disp is None: + # print(f'{viz.name}: WTF NO DISP') + # continue + + # r_up, r_dn = disp + disp = r_up - r_down - overlay_table[viz.plot.vb] = ( + # register curves by a "full" dispersion metric for + # later sort order in the overlay (technique + # ) application loop below. + overlay_table[disp] = ( + viz.plot.vb, viz, + y_ref, ymn, ymx, + read_slc, in_view, - ) + r_up, + r_down, + ) profiler(f'{viz.name}@{chart_name} yrange scan complete') # NOTE: if no there were no overlay charts # detected/collected (could be either no group detected or # chart with a single symbol, thus a single viz/overlay) - # then we ONLY set the lone chart's (viz) yrange and short + # then we ONLY set the mone chart's (viz) yrange and short # circuit to the next chart in the linked charts loop. IOW # there's no reason to go through the overlay dispersion # scaling in the next loop below when only one curve is @@ -534,13 +550,20 @@ def overlay_viewlists( elif ( mxmns_by_common_pi - and not major_sigma_viz + # and not major_sigma_viz + and not overlay_table ): # move to next chart in linked set since # no overlay transforming is needed. continue - profiler(f'<{chart_name}>.interact_graphics_cycle({name})') + msg = ( + f'`Viz` curve first pass complete\n' + f'overlay_table: {overlay_table.keys()}\n' + ) + profiler(msg) + if debug_print: + print(msg) # if a minor curves scaling brings it "outside" the range of # the major curve (in major curve co-domain terms) then we @@ -548,21 +571,39 @@ def overlay_viewlists( # below placeholder denotes when this occurs. # group_mxmn: None | tuple[float, float] = None - # TODO: probably re-write this loop as a compiled cpython or - # numba func. + r_up_mx: float + r_dn_mn: float + mx_disp = max(overlay_table) + mx_entry = overlay_table[mx_disp] + ( + _, # viewbox + mx_viz, # viz + _, # y_ref + mx_ymn, + mx_ymx, + _, # read_slc + _, # in_view array + r_up_mx, + r_dn_mn, + ) = mx_entry + + scaled: dict[float, tuple[float, float, float]] = {} # conduct "log-linearized multi-plot" scalings for all groups - for ( - view, + # -> iterate all curves Ci in dispersion-measure sorted order + # going from smallest swing to largest. + for full_disp in sorted(overlay_table): ( + view, viz, y_start, y_min, y_max, read_slc, minor_in_view, - ) - ) in overlay_table.items(): + r_up, + r_dn, + ) = overlay_table[full_disp] key = 'open' if viz.is_ohlc else viz.name @@ -575,8 +616,99 @@ def overlay_viewlists( ) continue - ymn = dnt.apply_rng(y_start) - ymx = upt.apply_rng(y_start) + xref = minor_in_view[0]['time'] + match overlay_technique: + + # Pin this curve to the "major dispersion" (or other + # target) curve by finding the intersect datum and + # then scaling according to the returns log-lin transort + # 'at that intersect reference data'. If the pinning + # results in this (minor/pinned) curve being out of view + # adjust the returns scalars to match this curves min + # y-range to stay in view. + case 'loglin_ref_to_curve': + + # TODO: technically we only need to do this here if + # + if viz is not mx_viz: + ( + i_start, + y_ref_major, + r_major_up_here, + r_major_down_here, + ) = mx_viz.scalars_from_index(xref) + + # transform y-range scaling to be the same as the + # equivalent "intersect" datum on the major + # dispersion curve (or other target "pin to" + # equivalent). + ymn = y_start * (1 + r_major_down_here) + if ymn > y_min: + ymn = y_min + r_dn_minor = (ymn - y_start) / y_start + + mx_ymn = y_ref_major * (1 + r_dn_minor) + + # TODO: rescale all already scaled curves to + # new increased range for this side. + # for ( + # view, + # (yref, ymn, ymx) + # ) in scaled.items(): + # pass + + ymx = y_start * (1 + r_major_up_here) + if ymx < y_max: + ymx = y_max + r_up_minor = (ymx - y_start) / y_start + mx_ymx = y_ref_major * (1 + r_up_minor) + + if debug_print: + print( + f'Minor SCALARS {viz.name}:\n' + f'xref: {xref}\n' + f'dn: {r_major_down_here}\n' + f'up: {r_major_up_here}\n' + ) + else: + if debug_print: + print( + f'MAJOR SCALARS {viz.name}:\n' + f'dn: {r_dn_mn}\n' + f'up: {r_up_mx}\n' + ) + # target/major curve's mxmn may have been + # reset by minor overlay steps above. + ymn = mx_ymn + ymx = mx_ymx + + # Pin all curves by their first datum in view to all + # others such that each curve's earliest datum provides the + # reference point for returns vs. every other curve in + # view. + case 'loglin_ref_to_first': + ymn = dnt.apply_rng(y_start) + ymx = upt.apply_rng(y_start) + + # Do not pin curves by log-linearizing their y-ranges, + # instead allow each curve to fully scale to the + # time-series in view's min and max y-values. + case 'mxmn': + ymn = y_min + ymx = y_max + + case _: + raise RuntimeError( + f'overlay_technique is invalid `{overlay_technique}' + ) + + scaled[view] = (y_start, ymn, ymx) + + for ( + view, + (yref, ymn, ymx) + + ) in scaled.items(): # NOTE XXX: we have to set each curve's range once (and # ONLY ONCE) here since we're doing this entire routine @@ -594,6 +726,8 @@ def overlay_viewlists( f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' + f'disp: {disp}\n' + f'xref for MINOR: {xref}\n' f'y_start: {y_start}\n' f'y min: {y_min}\n' f'y max: {y_max}\n' @@ -614,7 +748,10 @@ def overlay_viewlists( + '\n' ) + + profiler(f'<{chart_name}>.interact_graphics_cycle()') + if not do_linked_charts: - return + break profiler.finish() From c1ea8552acd4c142b33ff7488f3b7b44b0e2d995 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 27 Feb 2023 17:16:33 -0500 Subject: [PATCH 103/136] Back-rescale previous (minor) curves from latest For the "pin to target major/target curve" overlay method, this finally solves the longstanding issue of ensuring that any new minor curve, which requires and increase in the major/target curve y-range, also re-scales all previously scaled minor curves retroactively. Thus we now guarantee that all minor curves are correctly "pinned" to their target/major on their earliest available datum **and** are all kept in view. --- piker/ui/view_mode.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 73051d060a..674a92c310 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -651,18 +651,34 @@ def overlay_viewlists( # TODO: rescale all already scaled curves to # new increased range for this side. - # for ( - # view, - # (yref, ymn, ymx) - # ) in scaled.items(): - # pass + for _view in scaled: + _yref, _ymn, _ymx = scaled[_view] + new_ymn = _yref * (1 + r_dn_minor) + + # TODO: is there a faster way to do this + # by mutating state on some object + # instead? + scaled[_view] = (_yref, new_ymn, _ymx) ymx = y_start * (1 + r_major_up_here) if ymx < y_max: + # set the `scaled: dict` entry to ensure + # that this minor curve will be entirely in + # view. ymx = y_max r_up_minor = (ymx - y_start) / y_start + + # adjust the target-major curve's range to + # (log-linearly) include this extra range by + # applying the inverse transform of the + # minor. mx_ymx = y_ref_major * (1 + r_up_minor) + for _view in scaled: + _yref, _ymn, _ymx = scaled[_view] + new_ymx = _yref * (1 + r_up_minor) + scaled[_view] = (_yref, _ymn, new_ymx) + if debug_print: print( f'Minor SCALARS {viz.name}:\n' From 01ea706644490108e4c59bda219c7ff37dda788e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 09:43:16 -0500 Subject: [PATCH 104/136] Better doc string, use `Viz.vs: ViewState` --- piker/ui/_render.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index dc162834cf..69bd37c40c 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -51,7 +51,20 @@ class Renderer(msgspec.Struct): - + ''' + Low(er) level interface for converting a source, real-time updated, + data buffer (usually held in a ``ShmArray``) to a graphics data + format usable by `Qt`. + + A renderer reads in context-specific source data using a ``Viz``, + formats that data to a 2D-xy pre-graphics format using + a ``IncrementalFormatter``, then renders that data to a set of + output graphics objects normally a ``.ui._curve.FlowGraphics`` + sub-type to which the ``Renderer.path`` is applied and further "last + datum" graphics are updated from the source buffer's latest + sample(s). + + ''' viz: Viz fmtr: IncrementalFormatter @@ -195,7 +208,7 @@ def render( fast_path: QPainterPath = self.fast_path reset: bool = False - self.viz.yrange = None + self.viz.ds_yrange = None # redraw the entire source data if we have either of: # - no prior path graphic rendered or, @@ -218,7 +231,7 @@ def render( ) if ds_out is not None: x_1d, y_1d, ymn, ymx = ds_out - self.viz.yrange = ymn, ymx + self.viz.ds_yrange = ymn, ymx # print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}') reset = True From 45e97dd4c8a9ddea3419e756cf4ebc8b0383e0ad Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 10:53:06 -0500 Subject: [PATCH 105/136] Solve a final minor-should-rescale edge case When the minor has the same scaling as the major in a given direction we should still do back-scaling against the major-target and previous minors to avoid strange edge cases where only the target-major might not be shifted correctly to show an matched intersect point? More or less this just meant making the y-mxmn checks interval-inclusive with `>=`/`<=` operators. Also adds a shite ton of detailed comments throughout the pin-to-target method blocks and moves the final major y-range call outside the final `scaled: dict` loop. --- piker/ui/view_mode.py | 219 ++++++++++++++++++++++++++---------------- 1 file changed, 137 insertions(+), 82 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 674a92c310..a08dbe4179 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -81,8 +81,11 @@ def apply_rng( ) -> float: return y_ref * (1 + self.rng) - # def loglin_from_range( - # self, + def scalars_from_index( + self, + xref: float, + ) -> tuple[float, float]: + pass # y_ref: float, # reference value for dispersion metric # mn: float, # min y in target log-lin range @@ -359,6 +362,12 @@ def overlay_viewlists( key = 'open' if viz.is_ohlc else viz.name start_t = row_start['time'] + # TODO: call `Viz.disp_from_range()` here! + # disp = viz.disp_from_range(yref=y_ref) + # if disp is None: + # print(f'{viz.name}: WTF NO DISP') + # continue + # returns scalars r_up = (ymx - y_ref) / y_ref r_down = (ymn - y_ref) / y_ref @@ -489,12 +498,6 @@ def overlay_viewlists( profiler(msg) print(msg) - # disp = viz.disp_from_range(yref=y_ref) - # if disp is None: - # print(f'{viz.name}: WTF NO DISP') - # continue - - # r_up, r_dn = disp disp = r_up - r_down # register curves by a "full" dispersion metric for @@ -576,7 +579,7 @@ def overlay_viewlists( mx_disp = max(overlay_table) mx_entry = overlay_table[mx_disp] ( - _, # viewbox + mx_view, # viewbox mx_viz, # viz _, # y_ref mx_ymn, @@ -586,13 +589,17 @@ def overlay_viewlists( r_up_mx, r_dn_mn, ) = mx_entry + mx_disp = r_up_mx - r_dn_mn - scaled: dict[float, tuple[float, float, float]] = {} + scaled: dict[ + float, + tuple[Viz, float, float, float, float] + ] = {} # conduct "log-linearized multi-plot" scalings for all groups # -> iterate all curves Ci in dispersion-measure sorted order # going from smallest swing to largest. - for full_disp in sorted(overlay_table): + for full_disp in reversed(overlay_table): ( view, viz, @@ -620,17 +627,24 @@ def overlay_viewlists( match overlay_technique: # Pin this curve to the "major dispersion" (or other - # target) curve by finding the intersect datum and - # then scaling according to the returns log-lin transort - # 'at that intersect reference data'. If the pinning - # results in this (minor/pinned) curve being out of view - # adjust the returns scalars to match this curves min - # y-range to stay in view. + # target) curve: + # + # - find the intersect datum and then scaling according + # to the returns log-lin tranform 'at that intersect + # reference data'. + # - if the pinning/log-returns-based transform scaling + # results in this minor/pinned curve being out of + # view, adjust the scalars to match **this** curve's + # y-range to stay in view and then backpropagate that + # scaling to all curves, including the major-target, + # which were previously scaled before. case 'loglin_ref_to_curve': - - # TODO: technically we only need to do this here if - # if viz is not mx_viz: + + # calculate y-range scalars from the earliest + # "intersect" datum with the target-major + # (dispersion) curve so as to "pin" the curves + # in the y-domain at that spot. ( i_start, y_ref_major, @@ -638,46 +652,83 @@ def overlay_viewlists( r_major_down_here, ) = mx_viz.scalars_from_index(xref) - # transform y-range scaling to be the same as the - # equivalent "intersect" datum on the major - # dispersion curve (or other target "pin to" - # equivalent). ymn = y_start * (1 + r_major_down_here) - if ymn > y_min: + + # if this curve's y-range is detected as **not + # being in view** after applying the + # target-major's transform, adjust the + # target-major curve's range to (log-linearly) + # include it (the extra missing range) by + # adjusting the y-mxmn to this new y-range and + # applying the inverse transform of the minor + # back on the target-major (and possibly any + # other previously-scaled-to-target/major, minor + # curves). + if ymn >= y_min: ymn = y_min r_dn_minor = (ymn - y_start) / y_start + # rescale major curve's y-max to include new + # range increase required by **this minor**. mx_ymn = y_ref_major * (1 + r_dn_minor) - - # TODO: rescale all already scaled curves to - # new increased range for this side. + mx_viz.vs.yrange = mx_ymn, mx_viz.vs.yrange[1] + + # rescale all already scaled curves to new + # increased range for this side as + # determined by ``y_min`` staying in view; + # re-set the `scaled: dict` entry to + # ensure that this minor curve will be + # entirely in view. + # TODO: re updating already-scaled minor curves + # - is there a faster way to do this by + # mutating state on some object instead? for _view in scaled: - _yref, _ymn, _ymx = scaled[_view] - new_ymn = _yref * (1 + r_dn_minor) + _viz, _yref, _ymn, _ymx, _xref = scaled[_view] + ( + _, + _, + _, + r_major_down_here, + ) = mx_viz.scalars_from_index(_xref) + + new_ymn = _yref * (1 + r_major_down_here) - # TODO: is there a faster way to do this - # by mutating state on some object - # instead? - scaled[_view] = (_yref, new_ymn, _ymx) + scaled[_view] = ( + _viz, _yref, new_ymn, _ymx, _xref) + + if debug_print: + print( + f'RESCALE {_viz.name} ymn -> {new_ymn}' + f'RESCALE MAJ ymn -> {mx_ymn}' + ) ymx = y_start * (1 + r_major_up_here) - if ymx < y_max: - # set the `scaled: dict` entry to ensure - # that this minor curve will be entirely in - # view. + + # same as above but for minor being out-of-range + # on the upside. + if ymx <= y_max: ymx = y_max r_up_minor = (ymx - y_start) / y_start - - # adjust the target-major curve's range to - # (log-linearly) include this extra range by - # applying the inverse transform of the - # minor. mx_ymx = y_ref_major * (1 + r_up_minor) + mx_viz.vs.yrange = mx_viz.vs.yrange[0], mx_ymx for _view in scaled: - _yref, _ymn, _ymx = scaled[_view] - new_ymx = _yref * (1 + r_up_minor) - scaled[_view] = (_yref, _ymn, new_ymx) + _viz, _yref, _ymn, _ymx, _xref = scaled[_view] + ( + _, + _, + r_major_up_here, + _, + ) = mx_viz.scalars_from_index(_xref) + + new_ymx = _yref * (1 + r_major_up_here) + scaled[_view] = ( + _viz, _yref, _ymn, new_ymx, _xref) + + if debug_print: + print( + f'RESCALE {_viz.name} ymn -> {new_ymx}' + ) if debug_print: print( @@ -686,6 +737,12 @@ def overlay_viewlists( f'dn: {r_major_down_here}\n' f'up: {r_major_up_here}\n' ) + + # register all overlays for a final pass where we + # apply all pinned-curve y-range transform scalings. + scaled[view] = (viz, y_start, ymn, ymx, xref) + + # target/dispersion MAJOR case else: if debug_print: print( @@ -693,6 +750,7 @@ def overlay_viewlists( f'dn: {r_dn_mn}\n' f'up: {r_up_mx}\n' ) + # target/major curve's mxmn may have been # reset by minor overlay steps above. ymn = mx_ymn @@ -705,56 +763,53 @@ def overlay_viewlists( case 'loglin_ref_to_first': ymn = dnt.apply_rng(y_start) ymx = upt.apply_rng(y_start) + view._set_yrange(yrange=(ymn, ymx)) # Do not pin curves by log-linearizing their y-ranges, # instead allow each curve to fully scale to the # time-series in view's min and max y-values. case 'mxmn': - ymn = y_min - ymx = y_max + view._set_yrange(yrange=(y_min, y_max)) case _: raise RuntimeError( f'overlay_technique is invalid `{overlay_technique}' ) - scaled[view] = (y_start, ymn, ymx) - - for ( - view, - (yref, ymn, ymx) - - ) in scaled.items(): + if scaled: + for ( + view, + (viz, yref, ymn, ymx, xref) + ) in scaled.items(): - # NOTE XXX: we have to set each curve's range once (and - # ONLY ONCE) here since we're doing this entire routine - # inside of a single render cycle (and apparently calling - # `ViewBox.setYRange()` multiple times within one only takes - # the first call as serious...) XD - view._set_yrange( - yrange=(ymn, ymx), - ) - profiler(f'{viz.name}@{chart_name} log-SCALE minor') + # NOTE XXX: we have to set each curve's range once (and + # ONLY ONCE) here since we're doing this entire routine + # inside of a single render cycle (and apparently calling + # `ViewBox.setYRange()` multiple times within one only takes + # the first call as serious...) XD + view._set_yrange(yrange=(ymn, ymx)) + profiler(f'{viz.name}@{chart_name} log-SCALE minor') - if debug_print: - print( - '------------------------------\n' - f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' - f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' - f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' - f'disp: {disp}\n' - f'xref for MINOR: {xref}\n' - f'y_start: {y_start}\n' - f'y min: {y_min}\n' - f'y max: {y_max}\n' - f'T scaled ymn: {ymn}\n' - f'T scaled ymx: {ymx}\n' - '------------------------------\n' - ) + if debug_print: + print( + '------------------------------\n' + f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' + f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' + f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' + f'SIGMA MAJOR C: {mx_viz.name} -> {mx_disp}\n' + # f'disp: {disp}\n' + f'xref for MINOR: {xref}\n' + f'y_start: {y_start}\n' + f'y min: {y_min}\n' + f'y max: {y_max}\n' + f'T scaled ymn: {ymn}\n' + f'T scaled ymx: {ymx}\n' + '------------------------------\n' + ) - # vrs = major_sigma_viz.plot.vb.viewRange() - # if vrs[1][0] > major_mn: - # breakpoint() + # finally, scale major curve to possibly re-scaled/modified + # values + mx_view._set_yrange(yrange=(mx_ymn, mx_ymx)) if debug_print: print( From 8c392fda6032ab9a78c7dd6e5fffab6b6a051450 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 11:48:49 -0500 Subject: [PATCH 106/136] Drop a bunch of commented/uneeded cruft --- piker/ui/view_mode.py | 61 +++++-------------------------------------- 1 file changed, 6 insertions(+), 55 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index a08dbe4179..e6d1276947 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -19,9 +19,6 @@ ''' from __future__ import annotations -from math import ( - isinf, -) from typing import ( Any, Literal, @@ -74,32 +71,13 @@ class OverlayT(Struct): # that pin point causing the original range to have to increase. y_val: float | None = None - def apply_rng( + def apply_r( self, y_ref: float, # reference value for dispersion metric ) -> float: return y_ref * (1 + self.rng) - def scalars_from_index( - self, - xref: float, - ) -> tuple[float, float]: - pass - - # y_ref: float, # reference value for dispersion metric - # mn: float, # min y in target log-lin range - # mx: float, # max y in target log-lin range - # offset: float, # y-offset to start log-scaling from - - # ) -> tuple[float, float]: - # r_up = (mx - y_ref) / y_ref - # r_down = (mn - y_ref) / y_ref - # ymn = offset * (1 + r_down) - # ymx = offset * (1 + r_up) - - # return ymn, ymx - def intersect_from_longer( start_t_first: float, @@ -362,12 +340,6 @@ def overlay_viewlists( key = 'open' if viz.is_ohlc else viz.name start_t = row_start['time'] - # TODO: call `Viz.disp_from_range()` here! - # disp = viz.disp_from_range(yref=y_ref) - # if disp is None: - # print(f'{viz.name}: WTF NO DISP') - # continue - # returns scalars r_up = (ymx - y_ref) / y_ref r_down = (ymn - y_ref) / y_ref @@ -419,7 +391,7 @@ def overlay_viewlists( if intersect: longer_in_view, _t, i = intersect - scaled_mn = dnt.apply_rng(y_ref) + scaled_mn = dnt.apply_r(y_ref) if scaled_mn > ymn: # after major curve scaling we detected # the minor curve is still out of range @@ -479,7 +451,7 @@ def overlay_viewlists( # the minor curve is still out of range # so we need to adjust the major's range # to include the new composed range. - scaled_mx = upt.apply_rng(y_ref) + scaled_mx = upt.apply_r(y_ref) if scaled_mx < ymx: y_maj_ref = longer_in_view[key] new_major_ymx = y_maj_ref * (1 + r_up) @@ -498,22 +470,18 @@ def overlay_viewlists( profiler(msg) print(msg) - disp = r_up - r_down - # register curves by a "full" dispersion metric for # later sort order in the overlay (technique # ) application loop below. + disp = r_up - r_down overlay_table[disp] = ( viz.plot.vb, viz, - y_ref, ymn, ymx, - read_slc, in_view, - r_up, r_down, ) @@ -568,12 +536,6 @@ def overlay_viewlists( if debug_print: print(msg) - # if a minor curves scaling brings it "outside" the range of - # the major curve (in major curve co-domain terms) then we - # need to rescale the major to also include this range. The - # below placeholder denotes when this occurs. - # group_mxmn: None | tuple[float, float] = None - r_up_mx: float r_dn_mn: float mx_disp = max(overlay_table) @@ -589,7 +551,6 @@ def overlay_viewlists( r_up_mx, r_dn_mn, ) = mx_entry - mx_disp = r_up_mx - r_dn_mn scaled: dict[ float, @@ -614,15 +575,6 @@ def overlay_viewlists( key = 'open' if viz.is_ohlc else viz.name - if ( - isinf(ymx) - or isinf(ymn) - ): - log.warning( - f'BAD ymx/ymn: {(ymn, ymx)}' - ) - continue - xref = minor_in_view[0]['time'] match overlay_technique: @@ -761,8 +713,8 @@ def overlay_viewlists( # reference point for returns vs. every other curve in # view. case 'loglin_ref_to_first': - ymn = dnt.apply_rng(y_start) - ymx = upt.apply_rng(y_start) + ymn = dnt.apply_r(y_start) + ymx = upt.apply_r(y_start) view._set_yrange(yrange=(ymn, ymx)) # Do not pin curves by log-linearizing their y-ranges, @@ -797,7 +749,6 @@ def overlay_viewlists( f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' f'SIGMA MAJOR C: {mx_viz.name} -> {mx_disp}\n' - # f'disp: {disp}\n' f'xref for MINOR: {xref}\n' f'y_start: {y_start}\n' f'y min: {y_min}\n' From 993bb47138c58d3342d47d9c7a7ec594e5eb9f9a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 11:54:30 -0500 Subject: [PATCH 107/136] Drop passing overlay method from viewbox to view-mode handler --- piker/ui/_interaction.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 2200dae076..9d8442042b 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -929,8 +929,6 @@ def interact_graphics_cycle( tuple[float, float], ] | None = None, - overlay_technique: str = 'loglin_to_first', - ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', @@ -968,5 +966,4 @@ def interact_graphics_cycle( do_overlay_scaling=do_overlay_scaling, do_linked_charts=do_linked_charts, yrange_kwargs=yrange_kwargs, - overlay_technique=overlay_technique, ) From 75798630684664ec90e3ad86fff4a188fd14c646 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 14:32:03 -0500 Subject: [PATCH 108/136] Skip overlay handling when `N < 2` are detected --- piker/ui/view_mode.py | 64 +++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index e6d1276947..cc0fa44cce 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -201,6 +201,8 @@ def overlay_viewlists( chart: ChartPlotWidget for chart_name, chart in plots.items(): + overlay_viz_items = chart._vizs.items() + # Common `PlotItem` maxmin table; presumes that some path # graphics (and thus their backing data sets) are in the # same co-domain and view box (since the were added @@ -218,29 +220,11 @@ def overlay_viewlists( # and scale minor charts onto the major chart: the chart # with the most dispersion in the set. - # collect certain flows have grapics objects **in seperate - # plots/viewboxes** into groups and do a common calc to - # determine auto-ranging input for `._set_yrange()`. - # this is primarly used for our so called "log-linearized - # multi-plot" overlay technique. - overlay_table: dict[ - float, - tuple[ - ChartView, - Viz, - float, # y start - float, # y min - float, # y max - float, # y median - slice, # in-view array slice - np.ndarray, # in-view array - float, # returns up scalar - float, # return down scalar - ], - ] = {} - # ONLY auto-yrange the viz mapped to THIS view box - if not do_overlay_scaling: + if ( + not do_overlay_scaling + or len(overlay_viz_items) < 2 + ): viz = active_viz if debug_print: print(f'ONLY ranging THIS viz: {viz.name}') @@ -261,12 +245,6 @@ def overlay_viewlists( # don't iterate overlays, just move to next chart continue - # create a group overlay log-linearized y-range transform to - # track and eventually inverse transform all overlay curves - # to a common target max dispersion range. - dnt = OverlayT() - upt = OverlayT() - if debug_print: print( f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' @@ -276,7 +254,35 @@ def overlay_viewlists( '\n' ) - for name, viz in chart._vizs.items(): + # create a group overlay log-linearized y-range transform to + # track and eventually inverse transform all overlay curves + # to a common target max dispersion range. + dnt = OverlayT() + upt = OverlayT() + + # collect certain flows have grapics objects **in seperate + # plots/viewboxes** into groups and do a common calc to + # determine auto-ranging input for `._set_yrange()`. + # this is primarly used for our so called "log-linearized + # multi-plot" overlay technique. + overlay_table: dict[ + float, + tuple[ + ChartView, + Viz, + float, # y start + float, # y min + float, # y max + float, # y median + slice, # in-view array slice + np.ndarray, # in-view array + float, # returns up scalar + float, # return down scalar + ], + ] = {} + + # multi-curve overlay processing stage + for name, viz in overlay_viz_items: out = _maybe_calc_yrange( viz, From 94f0ef13ef424f0bca8f51066d2efd1782afe9e4 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 15:01:00 -0500 Subject: [PATCH 109/136] Repair x-label datetime labels when in array-index mode --- piker/ui/_axes.py | 13 ++++++++----- piker/ui/_cursor.py | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py index 9eda3c75f6..62214f60bf 100644 --- a/piker/ui/_axes.py +++ b/piker/ui/_axes.py @@ -1,5 +1,5 @@ # piker: trading gear for hackers -# Copyright (C) Tyler Goodlet (in stewardship for piker0) +# Copyright (C) Tyler Goodlet (in stewardship for pikers) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -304,8 +304,9 @@ def _indexes_to_timestrs( viz = chart._vizs[chart.name] shm = viz.shm array = shm.array - times = array['time'] - i_0, i_l = times[0], times[-1] + ifield = viz.index_field + index = array[ifield] + i_0, i_l = index[0], index[-1] # edge cases if ( @@ -317,11 +318,13 @@ def _indexes_to_timestrs( (indexes[0] > i_0 and indexes[-1] > i_l) ): + # print(f"x-label indexes empty edge case: {indexes}") return [] - if viz.index_field == 'index': - arr_len = times.shape[0] + if ifield == 'index': + arr_len = index.shape[0] first = shm._first.value + times = array['time'] epochs = times[ list( map( diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py index c118829f53..79df305b73 100644 --- a/piker/ui/_cursor.py +++ b/piker/ui/_cursor.py @@ -1,5 +1,5 @@ # piker: trading gear for hackers -# Copyright (C) Tyler Goodlet (in stewardship for piker0) +# Copyright (C) Tyler Goodlet (in stewardship for pikers) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by From 75807f4a96da94c26e291b78fd519ed545e7c753 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 15:09:15 -0500 Subject: [PATCH 110/136] Rename overlay technique var to `method` --- piker/ui/view_mode.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index cc0fa44cce..048177656e 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -181,11 +181,12 @@ def overlay_viewlists( tuple[float, float], ] | None = None, - overlay_technique: Literal[ + method: Literal[ 'loglin_ref_to_curve', 'loglin_ref_to_first', 'mxmn', 'solo', + ] = 'loglin_ref_to_curve', # internal debug @@ -194,7 +195,7 @@ def overlay_viewlists( ) -> None: ''' Calculate and apply y-domain (axis y-range) multi-curve overlay adjustments - a set of ``plots`` based on the requested ``overlay_technique``. + a set of ``plots`` based on the requested ``method``. ''' chart_name: str @@ -313,7 +314,7 @@ def overlay_viewlists( # non-overlay group case if ( not viz.is_ohlc - or overlay_technique == 'solo' + or method == 'solo' ): pi.vb._set_yrange(yrange=yrange) profiler( @@ -582,7 +583,7 @@ def overlay_viewlists( key = 'open' if viz.is_ohlc else viz.name xref = minor_in_view[0]['time'] - match overlay_technique: + match method: # Pin this curve to the "major dispersion" (or other # target) curve: @@ -731,7 +732,7 @@ def overlay_viewlists( case _: raise RuntimeError( - f'overlay_technique is invalid `{overlay_technique}' + f'overlay ``method`` is invalid `{method}' ) if scaled: From 77401a94fb3ff6859e80b94dae153c6f427b5a76 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 16:02:01 -0500 Subject: [PATCH 111/136] Simplify `FlowGraphics.x_last()` logics --- piker/ui/_curve.py | 5 ++++- piker/ui/_ohlc.py | 9 +++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 013448f353..052a94a3d7 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -158,7 +158,10 @@ def x_last(self) -> float | None: drawn yet, ``None``. ''' - return self._last_line.x1() if self._last_line else None + if self._last_line: + return self._last_line.x1() + + return None class Curve(FlowGraphic): diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 344805e8a9..25ebb591b8 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -93,7 +93,7 @@ class BarItems(FlowGraphic): ''' # XXX: causes this weird jitter bug when click-drag panning # where the path curve will awkwardly flicker back and forth? - # cache_mode: int = QGraphicsItem.NoCache + cache_mode: int = QGraphicsItem.NoCache def __init__( self, @@ -113,9 +113,10 @@ def x_last(self) -> None | float: ''' if self._last_bar_lines: close_arm_line = self._last_bar_lines[-1] - return close_arm_line.x2() if close_arm_line else None - else: - return None + if close_arm_line: + return close_arm_line.x2() + + return None # Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect def boundingRect(self): From eda283f0596584b89db9ab4387b8a7bd3d2e274c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 16:02:27 -0500 Subject: [PATCH 112/136] Fix focal min calc after switching to `Viz.datums_range()`.. --- piker/ui/_interaction.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 9d8442042b..6de25aa8a2 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -509,7 +509,7 @@ def wheelEvent( # don't zoom more then the min points setting viz = chart.get_viz(chart.name) - _, vl, lbar, rbar, vr, _ = viz.datums_range() + _, vl, lbar, rbar, vr, r = viz.datums_range() # TODO: max/min zoom limits incorporating time step size. # rl = vr - vl @@ -556,7 +556,7 @@ def wheelEvent( xl = viz.graphics.x_last() focal = min( xl, - vr, + r, ) self._resetTarget() From 75642929e32c0fc1610378abf2e997cfa12c7318 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 18:01:13 -0500 Subject: [PATCH 113/136] Move cache-reset ctx mngr to parent type: `FlowGraphics.reset_cache()` --- piker/ui/_curve.py | 16 ++++++++++------ piker/ui/_ohlc.py | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 052a94a3d7..091bbad066 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -163,6 +163,16 @@ def x_last(self) -> float | None: return None + @cm + def reset_cache(self) -> None: + self.setCacheMode(QtWidgets.QGraphicsItem.NoCache) + try: + log.debug(f'{self._name} -> CACHE DISABLE') + yield + finally: + log.debug(f'{self._name} -> CACHE ENABLE') + self.setCacheMode(self.cache_mode) + class Curve(FlowGraphic): ''' @@ -251,12 +261,6 @@ def clear(self): self.fast_path.clear() # self.fast_path = None - @cm - def reset_cache(self) -> None: - self.setCacheMode(QtWidgets.QGraphicsItem.NoCache) - yield - self.setCacheMode(QGraphicsItem.DeviceCoordinateCache) - def boundingRect(self): ''' Compute and then cache our rect. diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 25ebb591b8..f3eb12b0e5 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -93,7 +93,7 @@ class BarItems(FlowGraphic): ''' # XXX: causes this weird jitter bug when click-drag panning # where the path curve will awkwardly flicker back and forth? - cache_mode: int = QGraphicsItem.NoCache + # cache_mode: int = QGraphicsItem.NoCache def __init__( self, From 9b960594aab410ee4bc9ece8288514eb5c0f1079 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 18:02:34 -0500 Subject: [PATCH 114/136] Add per-chart `Viz`/overlay graphics iterator method --- piker/ui/_chart.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index d4dadab00d..095d11e03f 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1355,3 +1355,6 @@ def get_viz( @property def main_viz(self) -> Viz: return self.get_viz(self.name) + + def iter_vizs(self) -> Iterator[Viz]: + return iter(self._vizs.values()) From f7dfe57090bfa16f5bc14d139f3414538ef85ada Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 28 Feb 2023 18:03:41 -0500 Subject: [PATCH 115/136] Disable coordinate caching during interaction This finally seems to mitigate all the "smearing" and "jitter" artifacts when using Qt's "coordinate cache" graphics-mode: - whenever we're in a mouse interaction (as per calls to `ChartView.start/signal_ic()`) we simply disable the caching mode (set `.NoCache` until the interaction is complete. - only do this (for now) during a pan since it doesn't seem to be an issue when zooming? - ensure disabling all `Viz.graphics` and `.ds_graphics` to be agnostic to any case where there's both a zoom and a pan simultaneously (not that it's easy to do manually XD) as well as solving the problem whenever an OHLC series is in traced-and-downsampled mode (during low zoom). Impl deatz: - rename `ChartView._ic` -> `._in_interact: trio.Event` - add `.ChartView._interact_stack: ExitStack` which we use to open. and close the `FlowGraphics.reset_cache()` mngrs from mouse handlers. - drop all the commented per-subtype overrides for `.cache_mode: int`. - write up much better doc strings for `FlattenedOHLC` and `StepCurve` including some very basic ASCII-art diagrams. --- piker/ui/_curve.py | 54 +++++++++++++++++++++++++++++----------- piker/ui/_display.py | 6 ++--- piker/ui/_interaction.py | 38 ++++++++++++++++++---------- piker/ui/_ohlc.py | 5 ---- 4 files changed, 67 insertions(+), 36 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 091bbad066..5442d3471c 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -163,22 +163,32 @@ def x_last(self) -> float | None: return None + # XXX: due to a variety of weird jitter bugs and "smearing" + # artifacts when click-drag panning and viewing history time series, + # we offer this ctx-mngr interface to allow temporarily disabling + # Qt's graphics caching mode; this is now currently used from + # ``ChartView.start/signal_ic()`` methods which also disable the + # rt-display loop when the user is moving around a view. @cm def reset_cache(self) -> None: - self.setCacheMode(QtWidgets.QGraphicsItem.NoCache) try: - log.debug(f'{self._name} -> CACHE DISABLE') + none = QGraphicsItem.NoCache + log.debug( + f'{self._name} -> CACHE DISABLE: {none}' + ) + self.setCacheMode(none) yield finally: - log.debug(f'{self._name} -> CACHE ENABLE') - self.setCacheMode(self.cache_mode) + mode = self.cache_mode + log.debug(f'{self._name} -> CACHE ENABLE {mode}') + self.setCacheMode(mode) class Curve(FlowGraphic): ''' A faster, simpler, append friendly version of ``pyqtgraph.PlotCurveItem`` built for highly customizable real-time - updates. + updates; a graphics object to render a simple "line" plot. This type is a much stripped down version of a ``pyqtgraph`` style "graphics object" in the sense that the internal lower level @@ -385,7 +395,6 @@ def draw_last_datum( ) -> None: # default line draw last call - # with self.reset_cache(): x = src_data[index_field] y = src_data[array_key] @@ -413,12 +422,20 @@ def draw_last_datum( # element such that the current datum in view can be shown # (via it's max / min) even when highly zoomed out. class FlattenedOHLC(Curve): + ''' + More or less the exact same as a standard line ``Curve`` above + but meant to handle a traced-and-downsampled OHLC time series. + _ + _| | _ + |_ | |_ | | + _| => |_| | + | | + |_ |_ + + The main implementation different is that ``.draw_last_datum()`` + expects an underlying OHLC array for the ``src_data`` input. - # avoids strange dragging/smearing artifacts when panning - # as well as mouse over artefacts when the vlm chart series - # is "shorter" then some overlay.. - # cache_mode: int = QGraphicsItem.NoCache - + ''' def draw_last_datum( self, path: QPainterPath, @@ -443,12 +460,19 @@ def draw_last_datum( class StepCurve(Curve): + ''' + A familiar rectangle-with-y-height-per-datum type curve: + + || + || || + || || |||| + _||_||_||_||||_ where each datum's y-value is drawn as + a nearly full rectangle, each "level" spans some x-step size. - # avoids strange dragging/smearing artifacts when panning - # as well as mouse over artefacts when the vlm chart series - # is "shorter" then some overlay.. - # cache_mode: int = QGraphicsItem.NoCache + This is most often used for vlm and option OI style curves and/or + the very popular "bar chart". + ''' def declare_paintables( self, ) -> None: diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 8a4de766f8..685fcca779 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -473,7 +473,7 @@ async def graphics_update_loop( fast_chart.pause_all_feeds() continue - ic = fast_chart.view._ic + ic = fast_chart.view._in_interact if ic: fast_chart.pause_all_feeds() print(f'{fqsn} PAUSING DURING INTERACTION') @@ -756,8 +756,8 @@ def graphics_update_cycle( mx = max(mx, lmx) if ( - main_vb._ic is None - or not main_vb._ic.is_set() + main_vb._in_interact is None + or not main_vb._in_interact.is_set() ): # print(f'SETTING Y-mnmx -> {main_viz.name}: {(mn, mx)}') this_vb.interact_graphics_cycle( diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 6de25aa8a2..15e5b2ffa6 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -19,7 +19,10 @@ ''' from __future__ import annotations -from contextlib import asynccontextmanager +from contextlib import ( + asynccontextmanager, + ExitStack, +) import time from typing import ( Callable, @@ -405,7 +408,8 @@ def __init__( self.order_mode: bool = False self.setFocusPolicy(QtCore.Qt.StrongFocus) - self._ic = None + self._in_interact: trio.Event | None = None + self._interact_stack: ExitStack = ExitStack() # TODO: probably just assign this whenever a new `PlotItem` is # allocated since they're 1to1 with views.. @@ -420,10 +424,20 @@ def start_ic( to any interested task waiters. ''' - if self._ic is None: + if self._in_interact is None: + chart = self.chart try: - self.chart.pause_all_feeds() - self._ic = trio.Event() + chart.pause_all_feeds() + self._in_interact = trio.Event() + for viz in chart.iter_vizs(): + self._interact_stack.enter_context( + viz.graphics.reset_cache(), + ) + dsg = viz.ds_graphics + if dsg: + self._interact_stack.enter_context( + dsg.reset_cache(), + ) except RuntimeError: pass @@ -437,10 +451,11 @@ def signal_ic( to any waiters. ''' - if self._ic: + if self._in_interact: try: - self._ic.set() - self._ic = None + self._in_interact.set() + self._in_interact = None + self._interact_stack.close() self.chart.resume_all_feeds() except RuntimeError: pass @@ -667,9 +682,6 @@ def mouseDragEvent( self.start_ic() except RuntimeError: pass - # if self._ic is None: - # self.chart.pause_all_feeds() - # self._ic = trio.Event() if axis == 1: self.chart._static_yrange = 'axis' @@ -693,8 +705,8 @@ def mouseDragEvent( if ev.isFinish(): self.signal_ic() - # self._ic.set() - # self._ic = None + # self._in_interact.set() + # self._in_interact = None # self.chart.resume_all_feeds() # # XXX: WHY diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index f3eb12b0e5..33d7bbdaa8 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -28,7 +28,6 @@ QLineF, QRectF, ) -from PyQt5.QtWidgets import QGraphicsItem from PyQt5.QtGui import QPainterPath from ._curve import FlowGraphic @@ -91,10 +90,6 @@ class BarItems(FlowGraphic): "Price range" bars graphics rendered from a OHLC sampled sequence. ''' - # XXX: causes this weird jitter bug when click-drag panning - # where the path curve will awkwardly flicker back and forth? - # cache_mode: int = QGraphicsItem.NoCache - def __init__( self, *args, From eea850450a338fec24436401d5c41fcb764666fc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 1 Mar 2023 17:35:07 -0500 Subject: [PATCH 116/136] Handle yrange not set on view vase for vlm fsp plot --- piker/ui/_display.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 685fcca779..dbde38f770 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -853,7 +853,12 @@ def graphics_update_cycle( vlm_vizs = vlm_chart._vizs main_vlm_viz = vlm_vizs['volume'] main_vlm_vb = main_vlm_viz.plot.vb - (_, vlm_ymx) = vlm_yrange = main_vlm_vb._yrange + + # TODO: we should probably read this + # from the `Viz.vs: ViewState`! + vlm_yr = main_vlm_vb._yrange + if vlm_yr: + (_, vlm_ymx) = vlm_yrange = vlm_yr # always update y-label ds.vlm_sticky.update_from_data( @@ -892,6 +897,7 @@ def graphics_update_cycle( if ( mx_vlm_in_view + and vlm_yr and mx_vlm_in_view != vlm_ymx ): # in this case we want to scale all overlays in the From 9c8bd9b8ced64e96f0d87a42c1cd72f989afa1bc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 2 Mar 2023 12:55:26 -0500 Subject: [PATCH 117/136] Expand mxmn view y-margins back to 0.06 --- piker/ui/_interaction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 15e5b2ffa6..588d84bec9 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -763,7 +763,7 @@ def _set_yrange( # NOTE: this value pairs (more or less) with L1 label text # height offset from from the bid/ask lines. - range_margin: float | None = 0.09, + range_margin: float | None = 0.06, bars_range: tuple[int, int, int, int] | None = None, From 6690bd45768f61ae3a51843514eade6cd16b399c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 4 Mar 2023 16:49:20 -0500 Subject: [PATCH 118/136] Drop remaining non-usage of `ChartPlotWidget.maxmin()` --- piker/ui/_chart.py | 37 ------------------------------------- piker/ui/_display.py | 10 ---------- 2 files changed, 47 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 095d11e03f..fb3fc388b5 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1305,43 +1305,6 @@ def leaveEvent(self, ev): # noqa self.sig_mouse_leave.emit(self) self.scene().leaveEvent(ev) - def maxmin( - self, - name: str | None = None, - bars_range: tuple[ - int, int, int, int, int, int - ] | None = None, - - ) -> tuple[float, float]: - ''' - Return the max and min y-data values "in view". - - If ``bars_range`` is provided use that range. - - ''' - # TODO: here we should instead look up the ``Viz.shm.array`` - # and read directly from shm to avoid copying to memory first - # and then reading it again here. - viz_key = name or self.name - viz = self._vizs.get(viz_key) - if viz is None: - log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?") - return 0, 0 - - res = viz.maxmin() - - if ( - res is None - ): - mxmn = 0, 0 - if not self._on_screen: - self.default_view(do_ds=False) - self._on_screen = True - else: - x_range, read_slc, mxmn = res - - return mxmn - def get_viz( self, key: str, diff --git a/piker/ui/_display.py b/piker/ui/_display.py index dbde38f770..d7400e4b4c 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -21,7 +21,6 @@ graphics update methods via our custom ``pyqtgraph`` charting api. ''' -from functools import partial import itertools from math import floor import time @@ -389,7 +388,6 @@ async def graphics_update_loop( 'fqsn': fqsn, 'godwidget': godwidget, 'quotes': {}, - # 'maxmin': maxmin, 'flume': flume, @@ -1397,10 +1395,6 @@ async def display_symbol_data( # for zoom-interaction purposes. hist_viz.draw_last(array_key=fqsn) - hist_pi.vb.maxmin = partial( - hist_chart.maxmin, - name=fqsn, - ) # TODO: we need a better API to do this.. # specially store ref to shm for lookup in display loop # since only a placeholder of `None` is entered in @@ -1426,10 +1420,6 @@ async def display_symbol_data( color=bg_chart_color, last_step_color=bg_last_bar_color, ) - rt_pi.vb.maxmin = partial( - rt_chart.maxmin, - name=fqsn, - ) # TODO: we need a better API to do this.. # specially store ref to shm for lookup in display loop From 12bee716c25f9fc15824be5854710c6ec8b945fd Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 4 Mar 2023 17:07:46 -0500 Subject: [PATCH 119/136] Add `do_min_bars: bool` flag to `Viz.default_view()` --- piker/ui/_dataviz.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index f85022f7f2..1770150755 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1018,9 +1018,11 @@ def draw_last( def default_view( self, - bars_from_y: int = int(616 * 3/8), + min_bars_from_y: int = int(616 * 4/11), y_offset: int = 0, # in datums + do_ds: bool = True, + do_min_bars: bool = False, ) -> None: ''' @@ -1090,9 +1092,7 @@ def default_view( offset = l1_offset - if ( - rescale_to_data - ): + if rescale_to_data: offset = (offset / uppx) * new_uppx else: @@ -1130,6 +1130,12 @@ def default_view( # maintain the l->r view distance l_reset = r_reset - rl_diff + if ( + do_min_bars + and (r_reset - l_reset) < min_bars_from_y + ): + l_reset = r_reset - min_bars_from_y + # remove any custom user yrange setttings if chartw._static_yrange == 'axis': chartw._static_yrange = None @@ -1142,7 +1148,6 @@ def default_view( if do_ds: view.interact_graphics_cycle() - # view._set_yrange(viz=self) def incr_info( self, From 7e6e04b7e21104bef62cc13866d3cfa30e783376 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 5 Mar 2023 21:14:22 -0500 Subject: [PATCH 120/136] Drop remaining usage of `ChartPlotWidget.default_view()` Instead delegate directly to `Viz.default_view()` throughout charting startup and interaction handlers. Also add a `ChartPlotWidget.reset_graphics_caches()` context mngr which resets all managed graphics object's cacheing modes on enter and restores them on exit for simplified use in interaction handling code. --- piker/ui/_chart.py | 73 ++++++++++++++++++++++++++------------------ piker/ui/_display.py | 10 +++--- 2 files changed, 48 insertions(+), 35 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index fb3fc388b5..5f6c9ed1e4 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -19,6 +19,10 @@ ''' from __future__ import annotations +from contextlib import ( + contextmanager as cm, + ExitStack, +) from typing import ( Iterator, TYPE_CHECKING, @@ -257,7 +261,9 @@ async def load_symbols( # last had the xlast in view, if so then shift so it's # still in view, if the user was viewing history then # do nothing yah? - self.rt_linked.chart.default_view() + self.rt_linked.chart.main_viz.default_view( + do_min_bars=True, + ) # if a history chart instance is already up then # set the search widget as its sidepane. @@ -811,11 +817,17 @@ def resize_sidepanes( self.chart.sidepane.setMinimumWidth(sp_w) -# TODO: we should really drop using this type and instead just -# write our own wrapper around `PlotItem`.. +# TODO: a general rework of this widget-interface: +# - we should really drop using this type and instead just lever our +# own override of `PlotItem`.. +# - possibly rename to class -> MultiChart(pg.PlotWidget): +# where the widget is responsible for containing management +# harness for multi-Viz "view lists" and their associated mode-panes +# (fsp chain, order ctl, feed queue-ing params, actor ctl, etc). + class ChartPlotWidget(pg.PlotWidget): ''' - ``GraphicsView`` subtype containing a ``.plotItem: PlotItem`` as well + ``PlotWidget`` subtype containing a ``.plotItem: PlotItem`` as well as a `.pi_overlay: PlotItemOverlay`` which helps manage and overlay flow graphics view multiple compose view boxes. @@ -1005,32 +1017,6 @@ def marker_right_points( # ) return line_end, marker_right, r_axis_x - def default_view( - self, - bars_from_y: int = int(616 * 3/8), - y_offset: int = 0, - do_ds: bool = True, - - ) -> None: - ''' - Set the view box to the "default" startup view of the scene. - - ''' - viz = self.get_viz(self.name) - - if not viz: - log.warning(f'`Viz` for {self.name} not loaded yet?') - return - - viz.default_view( - bars_from_y, - y_offset, - do_ds, - ) - - if do_ds: - self.linked.graphics_cycle() - def increment_view( self, datums: int = 1, @@ -1321,3 +1307,30 @@ def main_viz(self) -> Viz: def iter_vizs(self) -> Iterator[Viz]: return iter(self._vizs.values()) + + @cm + def reset_graphics_caches(self) -> None: + ''' + Reset all managed ``Viz`` (flow) graphics objects + Qt cache modes (to ``NoCache`` mode) on enter and + restore on exit. + + ''' + with ExitStack() as stack: + for viz in self.iter_vizs(): + stack.enter_context( + viz.graphics.reset_cache(), + ) + + # also reset any downsampled alt-graphics objects which + # might be active. + dsg = viz.ds_graphics + if dsg: + stack.enter_context( + dsg.reset_cache(), + ) + try: + print("RESETTING ALL") + yield + finally: + stack.close() diff --git a/piker/ui/_display.py b/piker/ui/_display.py index d7400e4b4c..3b779edfe9 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -419,7 +419,7 @@ async def graphics_update_loop( ds.vlm_chart = vlm_chart ds.vlm_sticky = vlm_sticky - fast_chart.default_view() + fast_chart.main_viz.default_view() # ds.hist_vars.update({ # 'i_last_append': 0, @@ -1446,7 +1446,7 @@ async def display_symbol_data( for fqsn, flume in feed.flumes.items(): # size view to data prior to order mode init - rt_chart.default_view() + rt_chart.main_viz.default_view() rt_linked.graphics_cycle() # TODO: look into this because not sure why it was @@ -1457,7 +1457,7 @@ async def display_symbol_data( # determine if auto-range adjustements should be made. # rt_linked.subplots.pop('volume', None) - hist_chart.default_view() + hist_chart.main_viz.default_view() hist_linked.graphics_cycle() godwidget.resize_all() @@ -1500,10 +1500,10 @@ async def display_symbol_data( # default view adjuments and sidepane alignment # as final default UX touch. - rt_chart.default_view() + rt_chart.main_viz.default_view() await trio.sleep(0) - hist_chart.default_view() + hist_chart.main_viz.default_view() hist_viz = hist_chart.get_viz(fqsn) await trio.sleep(0) From 57d56c4791c525662da3fdffd86ac8a4a3e2e66d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 5 Mar 2023 21:22:55 -0500 Subject: [PATCH 121/136] Facepalm: set `Viz.ViewState.yrange` even on cache hits.. --- piker/ui/_dataviz.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 1770150755..dd3ec411f4 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -249,7 +249,7 @@ class ViewState(Struct): in_view: np.ndarray | None = None -class Viz(Struct): # , frozen=True): +class Viz(Struct): ''' (Data) "Visualization" compound type which wraps a real-time shm array stream with displayed graphics (curves, charts) @@ -454,6 +454,7 @@ def maxmin( f'{ixrng} -> {cached_result}' ) read_slc, mxmn = cached_result + self.vs.yrange = mxmn return ( ixrng, read_slc, @@ -1148,6 +1149,7 @@ def default_view( if do_ds: view.interact_graphics_cycle() + view.interact_graphics_cycle() def incr_info( self, From fc98d66ffc3e3575423ae426f1b9d9edb4cc5070 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 5 Mar 2023 21:23:42 -0500 Subject: [PATCH 122/136] Fix curve up-sampling on `'r'` hotkey Previously when very zoomed out and using the `'r'` hotkey the interaction handler loop wouldn't trigger a re-(up)sampling to get a more detailed curve graphic and instead the previous downsampled (under-detailed) graphic would show. Fix that by ensuring we yield back to the Qt event loop and do at least a couple render cycles with paired `.interact_graphics_cycle()` calls. Further this flips the `.start/signal_ic()` methods to use the new `.reset_graphics_caches()` ctr-mngr method. --- piker/ui/_interaction.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 588d84bec9..b4a78931be 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -154,9 +154,12 @@ async def handle_viewmode_kb_inputs( god = order_mode.godw # noqa feed = order_mode.feed # noqa chart = order_mode.chart # noqa + viz = chart.main_viz # noqa vlm_chart = chart.linked.subplots['volume'] # noqa + vlm_viz = vlm_chart.main_viz # noqa dvlm_pi = vlm_chart._vizs['dolla_vlm'].plot # noqa await tractor.breakpoint() + view.interact_graphics_cycle() # SEARCH MODE # # ctlr-/ for "lookup", "search" -> open search tree @@ -185,9 +188,13 @@ async def handle_viewmode_kb_inputs( # View modes if key == Qt.Key_R: - # TODO: set this for all subplots - # edge triggered default view activation - view.chart.default_view() + # NOTE: seems that if we don't yield a Qt render + # cycle then the m4 downsampled curves will show here + # without another reset.. + view._viz.default_view() + view.interact_graphics_cycle() + await trio.sleep(0) + view.interact_graphics_cycle() if len(fast_key_seq) > 1: # begin matches against sequences @@ -427,17 +434,12 @@ def start_ic( if self._in_interact is None: chart = self.chart try: - chart.pause_all_feeds() self._in_interact = trio.Event() - for viz in chart.iter_vizs(): - self._interact_stack.enter_context( - viz.graphics.reset_cache(), - ) - dsg = viz.ds_graphics - if dsg: - self._interact_stack.enter_context( - dsg.reset_cache(), - ) + + chart.pause_all_feeds() + self._interact_stack.enter_context( + chart.reset_graphics_caches() + ) except RuntimeError: pass @@ -453,10 +455,11 @@ def signal_ic( ''' if self._in_interact: try: - self._in_interact.set() - self._in_interact = None self._interact_stack.close() self.chart.resume_all_feeds() + + self._in_interact.set() + self._in_interact = None except RuntimeError: pass @@ -940,7 +943,7 @@ def interact_graphics_cycle( str, tuple[float, float], ] | None = None, - + ): profiler = Profiler( msg=f'ChartView.interact_graphics_cycle() for {self.name}', From 05aee4a3111889baee4b1383da4192daf66cc145 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 6 Mar 2023 10:37:26 -0500 Subject: [PATCH 123/136] Tweak debug printing to display y-mxmn per viz --- piker/ui/view_mode.py | 45 ++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 048177656e..4b1cdac725 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -250,7 +250,7 @@ def overlay_viewlists( print( f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' + - '#'*100 + '#'*66 + '\n' ) @@ -350,12 +350,14 @@ def overlay_viewlists( # returns scalars r_up = (ymx - y_ref) / y_ref r_down = (ymn - y_ref) / y_ref + disp = r_up - r_down msg = ( - f'### {viz.name}@{chart_name} ###\n' + f'=> {viz.name}@{chart_name}\n' f'y_ref: {y_ref}\n' f'down disp: {r_down}\n' f'up disp: {r_up}\n' + f'full disp: {disp}\n' ) profiler(msg) if debug_print: @@ -376,7 +378,7 @@ def overlay_viewlists( dnt.start_t = in_view[0]['time'] dnt.y_val = ymn - msg = f'NEW DOWN: {viz.name}@{chart_name} r:{r_down}\n' + msg = f'NEW DOWN: {viz.name}@{chart_name} r: {r_down}' profiler(msg) if debug_print: print(msg) @@ -437,7 +439,7 @@ def overlay_viewlists( upt.in_view = in_view upt.start_t = in_view[0]['time'] upt.y_val = ymx - msg = f'NEW UP: {viz.name}@{chart_name} r:{r_up}\n' + msg = f'NEW UP: {viz.name}@{chart_name} r: {r_up}' profiler(msg) if debug_print: print(msg) @@ -480,7 +482,6 @@ def overlay_viewlists( # register curves by a "full" dispersion metric for # later sort order in the overlay (technique # ) application loop below. - disp = r_up - r_down overlay_table[disp] = ( viz.plot.vb, viz, @@ -528,19 +529,21 @@ def overlay_viewlists( elif ( mxmns_by_common_pi - # and not major_sigma_viz and not overlay_table ): # move to next chart in linked set since # no overlay transforming is needed. continue - msg = ( - f'`Viz` curve first pass complete\n' - f'overlay_table: {overlay_table.keys()}\n' - ) - profiler(msg) + profiler('`Viz` curve first pass complete\n') + if debug_print: + # print overlay table in descending dispersion order + msg = 'overlays by disp:\n' + for disp in reversed(overlay_table): + entry = overlay_table[disp] + msg += f'{entry[1].name}: {disp}\n' + print(msg) r_up_mx: float @@ -612,6 +615,7 @@ def overlay_viewlists( ) = mx_viz.scalars_from_index(xref) ymn = y_start * (1 + r_major_down_here) + ymx = y_start * (1 + r_major_up_here) # if this curve's y-range is detected as **not # being in view** after applying the @@ -661,8 +665,6 @@ def overlay_viewlists( f'RESCALE MAJ ymn -> {mx_ymn}' ) - ymx = y_start * (1 + r_major_up_here) - # same as above but for minor being out-of-range # on the upside. if ymx <= y_max: @@ -689,18 +691,20 @@ def overlay_viewlists( f'RESCALE {_viz.name} ymn -> {new_ymx}' ) + # register all overlays for a final pass where we + # apply all pinned-curve y-range transform scalings. + scaled[view] = (viz, y_start, ymn, ymx, xref) + if debug_print: print( f'Minor SCALARS {viz.name}:\n' f'xref: {xref}\n' f'dn: {r_major_down_here}\n' f'up: {r_major_up_here}\n' + f'ymn: {ymn}\n' + f'ymx: {ymx}\n' ) - # register all overlays for a final pass where we - # apply all pinned-curve y-range transform scalings. - scaled[view] = (viz, y_start, ymn, ymx, xref) - # target/dispersion MAJOR case else: if debug_print: @@ -708,6 +712,8 @@ def overlay_viewlists( f'MAJOR SCALARS {viz.name}:\n' f'dn: {r_dn_mn}\n' f'up: {r_up_mx}\n' + f'mx_ymn: {mx_ymn}\n' + f'mx_ymx: {mx_ymx}\n' ) # target/major curve's mxmn may have been @@ -763,6 +769,9 @@ def overlay_viewlists( f'T scaled ymn: {ymn}\n' f'T scaled ymx: {ymx}\n' '------------------------------\n' + f'Viz[{viz.name}]:\n' + f' .yrange = {viz.vs.yrange}\n' + f' .xrange = {viz.vs.xrange}\n' ) # finally, scale major curve to possibly re-scaled/modified @@ -773,7 +782,7 @@ def overlay_viewlists( print( f'END UX GRAPHICS CYCLE: @{chart_name}\n' + - '#'*100 + '#'*66 + '\n' ) From 4bb580ae607573d4b75910db02eb6ab5daafe11d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 6 Mar 2023 18:30:58 -0500 Subject: [PATCH 124/136] Don't `@lru_cache` on `Viz.i_from_t()`, since view state.. --- piker/ui/_dataviz.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index dd3ec411f4..2d1b6eed8e 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1372,18 +1372,30 @@ def disp_from_range( case 'both': return r_up, r_down - @lru_cache(maxsize=6116) + # @lru_cache(maxsize=6116) def i_from_t( self, t: float, - ) -> int: - return slice_from_time( + return_y: bool = False, + + ) -> int | tuple[int, float]: + + istart = slice_from_time( self.vs.in_view, start_t=t, stop_t=t, step=self.index_step(), ).start + if not return_y: + return istart + + vs = self.vs + arr = vs.in_view + key = 'open' if self.is_ohlc else self.name + yref = arr[istart][key] + return istart, yref + def scalars_from_index( self, xref: float | None = None, From 51f3733487666a4954ad6caadf44945ddfb6cfb3 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 6 Mar 2023 19:03:04 -0500 Subject: [PATCH 125/136] Handle "target-is-shorter-then-pinned" case When the target pinning curve (by default, the dispersion major) is shorter then the pinned curve, we need to make sure we find still find the x-intersect for computing returns scalars! Use `Viz.i_from_t()` to accomplish this as well and, augment that method with a `return_y: bool` to allow the caller to also retrieve the equivalent y-value at the requested input time `t: float` for convenience. Also tweak a few more internals around the 'loglin_ref_to_curve' method: - only solve / adjust for the above case when the major's xref is detected as being "earlier" in time the current minor's. - pop the major viz entry from the overlay table ahead of time to avoid a needless iteration and simplify the transform calc phase loop to avoid handling that needless cycle B) - add much better "organized" debug printing with more clear headers around which "phase"/loop the message pertains and well as more explicit details in terms of x and y-range values on each cycle of each loop. --- piker/ui/view_mode.py | 366 +++++++++++++++++++++++++----------------- 1 file changed, 218 insertions(+), 148 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 4b1cdac725..3e15ffface 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -26,6 +26,7 @@ ) import numpy as np +import pendulum import pyqtgraph as pg from ..data.types import Struct @@ -247,10 +248,11 @@ def overlay_viewlists( continue if debug_print: + divstr = '#'*46 print( f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n' + - '#'*66 + divstr + '\n' ) @@ -353,11 +355,16 @@ def overlay_viewlists( disp = r_up - r_down msg = ( - f'=> {viz.name}@{chart_name}\n' + f'Viz[{viz.name}][{key}]: @{chart_name}\n' + f' .yrange = {viz.vs.yrange}\n' + f' .xrange = {viz.vs.xrange}\n\n' + f'start_t: {start_t}\n' f'y_ref: {y_ref}\n' - f'down disp: {r_down}\n' - f'up disp: {r_up}\n' - f'full disp: {disp}\n' + f'ymn: {ymn}\n' + f'ymx: {ymx}\n' + f'r_up disp: {r_up}\n' + f'r_down: {r_down}\n' + f'(full) disp: {disp}\n' ) profiler(msg) if debug_print: @@ -378,10 +385,7 @@ def overlay_viewlists( dnt.start_t = in_view[0]['time'] dnt.y_val = ymn - msg = f'NEW DOWN: {viz.name}@{chart_name} r: {r_down}' - profiler(msg) - if debug_print: - print(msg) + profiler(f'NEW DOWN: {viz.name}@{chart_name} r: {r_down}') else: # minor in the down swing range so check that if # we apply the current rng to the minor that it @@ -439,10 +443,7 @@ def overlay_viewlists( upt.in_view = in_view upt.start_t = in_view[0]['time'] upt.y_val = ymx - msg = f'NEW UP: {viz.name}@{chart_name} r: {r_up}' - profiler(msg) - if debug_print: - print(msg) + profiler(f'NEW UP: {viz.name}@{chart_name} r: {r_up}') else: intersect = intersect_from_longer( @@ -535,21 +536,32 @@ def overlay_viewlists( # no overlay transforming is needed. continue - profiler('`Viz` curve first pass complete\n') + profiler('`Viz` curve (first) scan phase complete\n') + + r_up_mx: float + r_dn_mn: float + mx_disp = max(overlay_table) if debug_print: # print overlay table in descending dispersion order - msg = 'overlays by disp:\n' - for disp in reversed(overlay_table): + msg = 'overlays in dispersion order:\n' + for i, disp in enumerate(reversed(overlay_table)): entry = overlay_table[disp] - msg += f'{entry[1].name}: {disp}\n' + msg += f' [{i}] {disp}: {entry[1].name}\n' - print(msg) + print( + 'TRANSFORM PHASE' + '-'*100 + '\n\n' + + + msg + ) + + if method == 'loglin_ref_to_curve': + mx_entry = overlay_table.pop(mx_disp) + else: + # TODO: for pin to first-in-view we need to no pop this from the + # table, but can we simplify below code even more? + mx_entry = overlay_table[mx_disp] - r_up_mx: float - r_dn_mn: float - mx_disp = max(overlay_table) - mx_entry = overlay_table[mx_disp] ( mx_view, # viewbox mx_viz, # viz @@ -557,19 +569,28 @@ def overlay_viewlists( mx_ymn, mx_ymx, _, # read_slc - _, # in_view array + mx_in_view, # in_view array r_up_mx, r_dn_mn, ) = mx_entry - + mx_time = mx_in_view['time'] + mx_xref = mx_time[0] + + # conduct "log-linearized multi-plot" range transform + # calculations for curves detected as overlays in the previous + # loop: + # -> iterate all curves Ci in dispersion-measure sorted order + # going from smallest swing to largest via the + # ``overlay_table: dict``, + # -> match on overlay ``method: str`` provided by caller, + # -> calc y-ranges from each curve's time series and store in + # a final table ``scaled: dict`` for final application in the + # scaling loop; the final phase. scaled: dict[ float, tuple[Viz, float, float, float, float] ] = {} - # conduct "log-linearized multi-plot" scalings for all groups - # -> iterate all curves Ci in dispersion-measure sorted order - # going from smallest swing to largest. for full_disp in reversed(overlay_table): ( view, @@ -601,125 +622,160 @@ def overlay_viewlists( # scaling to all curves, including the major-target, # which were previously scaled before. case 'loglin_ref_to_curve': - if viz is not mx_viz: - # calculate y-range scalars from the earliest - # "intersect" datum with the target-major - # (dispersion) curve so as to "pin" the curves - # in the y-domain at that spot. + # calculate y-range scalars from the earliest + # "intersect" datum with the target-major + # (dispersion) curve so as to "pin" the curves + # in the y-domain at that spot. + # NOTE: there are 2 cases for un-matched support + # in x-domain (where one series is shorter then the + # other): + # => major is longer then minor: + # - need to scale the minor *from* the first + # supported datum in both series. + # + # => major is shorter then minor: + # - need to scale the minor *from* the first + # supported datum in both series (the + # intersect x-value) but using the + # intersecting point from the minor **not** + # its first value in view! + yref = y_start + + if mx_xref > xref: ( - i_start, - y_ref_major, - r_major_up_here, - r_major_down_here, - ) = mx_viz.scalars_from_index(xref) - - ymn = y_start * (1 + r_major_down_here) - ymx = y_start * (1 + r_major_up_here) - - # if this curve's y-range is detected as **not - # being in view** after applying the - # target-major's transform, adjust the - # target-major curve's range to (log-linearly) - # include it (the extra missing range) by - # adjusting the y-mxmn to this new y-range and - # applying the inverse transform of the minor - # back on the target-major (and possibly any - # other previously-scaled-to-target/major, minor - # curves). - if ymn >= y_min: - ymn = y_min - r_dn_minor = (ymn - y_start) / y_start - - # rescale major curve's y-max to include new - # range increase required by **this minor**. - mx_ymn = y_ref_major * (1 + r_dn_minor) - mx_viz.vs.yrange = mx_ymn, mx_viz.vs.yrange[1] - - # rescale all already scaled curves to new - # increased range for this side as - # determined by ``y_min`` staying in view; - # re-set the `scaled: dict` entry to - # ensure that this minor curve will be - # entirely in view. - # TODO: re updating already-scaled minor curves - # - is there a faster way to do this by - # mutating state on some object instead? - for _view in scaled: - _viz, _yref, _ymn, _ymx, _xref = scaled[_view] - ( - _, - _, - _, - r_major_down_here, - ) = mx_viz.scalars_from_index(_xref) - - new_ymn = _yref * (1 + r_major_down_here) - - scaled[_view] = ( - _viz, _yref, new_ymn, _ymx, _xref) - - if debug_print: - print( - f'RESCALE {_viz.name} ymn -> {new_ymn}' - f'RESCALE MAJ ymn -> {mx_ymn}' - ) - - # same as above but for minor being out-of-range - # on the upside. - if ymx <= y_max: - ymx = y_max - r_up_minor = (ymx - y_start) / y_start - mx_ymx = y_ref_major * (1 + r_up_minor) - mx_viz.vs.yrange = mx_viz.vs.yrange[0], mx_ymx - - for _view in scaled: - _viz, _yref, _ymn, _ymx, _xref = scaled[_view] - ( - _, - _, - r_major_up_here, - _, - ) = mx_viz.scalars_from_index(_xref) - - new_ymx = _yref * (1 + r_major_up_here) - scaled[_view] = ( - _viz, _yref, _ymn, new_ymx, _xref) - - if debug_print: - print( - f'RESCALE {_viz.name} ymn -> {new_ymx}' - ) - - # register all overlays for a final pass where we - # apply all pinned-curve y-range transform scalings. - scaled[view] = (viz, y_start, ymn, ymx, xref) + xref_pin, + yref, + ) = viz.i_from_t( + mx_xref, + return_y=True, + ) + xref_pin_dt = pendulum.from_timestamp(xref_pin) + xref = mx_xref if debug_print: print( - f'Minor SCALARS {viz.name}:\n' + 'MAJOR SHORTER!!!\n' f'xref: {xref}\n' - f'dn: {r_major_down_here}\n' - f'up: {r_major_up_here}\n' - f'ymn: {ymn}\n' - f'ymx: {ymx}\n' + f'xref_pin: {xref_pin}\n' + f'xref_pin-dt: {xref_pin_dt}\n' + f'yref@xref_pin: {yref}\n' + ) + + ( + i_start, + y_ref_major, + r_up_from_major_at_xref, + r_down_from_major_at_xref, + ) = mx_viz.scalars_from_index(xref) + + ymn = yref * (1 + r_down_from_major_at_xref) + ymx = yref * (1 + r_up_from_major_at_xref) + + # if this curve's y-range is detected as **not + # being in view** after applying the + # target-major's transform, adjust the + # target-major curve's range to (log-linearly) + # include it (the extra missing range) by + # adjusting the y-mxmn to this new y-range and + # applying the inverse transform of the minor + # back on the target-major (and possibly any + # other previously-scaled-to-target/major, minor + # curves). + if ymn >= y_min: + ymn = y_min + r_dn_minor = (ymn - yref) / yref + + # rescale major curve's y-max to include new + # range increase required by **this minor**. + mx_ymn = y_ref_major * (1 + r_dn_minor) + mx_viz.vs.yrange = mx_ymn, mx_viz.vs.yrange[1] + + if debug_print: + print( + f'RESCALE {viz.name} ymn -> {y_min}' + f'RESCALE MAJ ymn -> {mx_ymn}' ) + # rescale all already scaled curves to new + # increased range for this side as + # determined by ``y_min`` staying in view; + # re-set the `scaled: dict` entry to + # ensure that this minor curve will be + # entirely in view. + # TODO: re updating already-scaled minor curves + # - is there a faster way to do this by + # mutating state on some object instead? + for _view in scaled: + _viz, _yref, _ymn, _ymx, _xref = scaled[_view] + ( + _, + _, + _, + r_down_from_out_of_range, + ) = mx_viz.scalars_from_index(_xref) + + new_ymn = _yref * (1 + r_down_from_out_of_range) + + scaled[_view] = ( + _viz, _yref, new_ymn, _ymx, _xref) + + if debug_print: + print( + f'RESCALE {_viz.name} ymn -> {new_ymn}' + f'RESCALE MAJ ymn -> {mx_ymn}' + ) + + # same as above but for minor being out-of-range + # on the upside. + if ymx <= y_max: + ymx = y_max + r_up_minor = (ymx - yref) / yref + mx_ymx = y_ref_major * (1 + r_up_minor) + mx_viz.vs.yrange = mx_viz.vs.yrange[0], mx_ymx - # target/dispersion MAJOR case - else: if debug_print: print( - f'MAJOR SCALARS {viz.name}:\n' - f'dn: {r_dn_mn}\n' - f'up: {r_up_mx}\n' - f'mx_ymn: {mx_ymn}\n' - f'mx_ymx: {mx_ymx}\n' + f'RESCALE {viz.name} ymn -> {y_max}' + f'RESCALE MAJ ymx -> {mx_ymx}' ) - # target/major curve's mxmn may have been - # reset by minor overlay steps above. - ymn = mx_ymn - ymx = mx_ymx + for _view in scaled: + _viz, _yref, _ymn, _ymx, _xref = scaled[_view] + ( + _, + _, + r_up_from_out_of_range, + _, + ) = mx_viz.scalars_from_index(_xref) + + new_ymx = _yref * (1 + r_up_from_out_of_range) + scaled[_view] = ( + _viz, _yref, _ymn, new_ymx, _xref) + + if debug_print: + print( + f'RESCALE {_viz.name} ymn -> {new_ymx}' + ) + + # register all overlays for a final pass where we + # apply all pinned-curve y-range transform scalings. + scaled[view] = (viz, yref, ymn, ymx, xref) + + if debug_print: + print( + f'Viz[{viz.name}]: @ {chart_name}\n' + f' .yrange = {viz.vs.yrange}\n' + f' .xrange = {viz.vs.xrange}\n\n' + f'xref: {xref}\n' + f'xref-dt: {pendulum.from_timestamp(xref)}\n' + f'y_min: {y_min}\n' + f'y_max: {y_max}\n' + f'RESCALING\n' + f'r dn: {r_down_from_major_at_xref}\n' + f'r up: {r_up_from_major_at_xref}\n' + f'ymn: {ymn}\n' + f'ymx: {ymx}\n' + ) # Pin all curves by their first datum in view to all # others such that each curve's earliest datum provides the @@ -742,6 +798,22 @@ def overlay_viewlists( ) if scaled: + if debug_print: + print( + 'SCALING PHASE' + '-'*100 + '\n\n' + '_________MAJOR INFO___________\n' + f'SIGMA MAJOR C: {mx_viz.name} -> {mx_disp}\n' + f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' + f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' + f'xref: {mx_xref}\n' + f'xref-dt: {pendulum.from_timestamp(mx_xref)}\n' + f'dn: {r_dn_mn}\n' + f'up: {r_up_mx}\n' + f'mx_ymn: {mx_ymn}\n' + f'mx_ymx: {mx_ymx}\n' + '------------------------------' + ) + for ( view, (viz, yref, ymn, ymx, xref) @@ -757,32 +829,30 @@ def overlay_viewlists( if debug_print: print( - '------------------------------\n' - f'LOGLIN SCALE CYCLE: {viz.name}@{chart_name}\n' - f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n' - f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n' - f'SIGMA MAJOR C: {mx_viz.name} -> {mx_disp}\n' - f'xref for MINOR: {xref}\n' + '_________MINOR INFO___________\n' + f'Viz[{viz.name}]: @ {chart_name}\n' + f' .yrange = {viz.vs.yrange}\n' + f' .xrange = {viz.vs.xrange}\n\n' + f'xref: {xref}\n' + f'xref-dt: {pendulum.from_timestamp(xref)}\n' f'y_start: {y_start}\n' f'y min: {y_min}\n' f'y max: {y_max}\n' f'T scaled ymn: {ymn}\n' - f'T scaled ymx: {ymx}\n' - '------------------------------\n' - f'Viz[{viz.name}]:\n' - f' .yrange = {viz.vs.yrange}\n' - f' .xrange = {viz.vs.xrange}\n' + f'T scaled ymx: {ymx}\n\n' + '--------------------------------\n' ) - # finally, scale major curve to possibly re-scaled/modified - # values + # finally, scale the major target/dispersion curve to + # the (possibly re-scaled/modified) values were set in + # transform phase loop. mx_view._set_yrange(yrange=(mx_ymn, mx_ymx)) if debug_print: print( f'END UX GRAPHICS CYCLE: @{chart_name}\n' + - '#'*66 + divstr + '\n' ) From 712f1a47a0f9b7c892e1c24c31bd1d9aab502d75 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 14:36:19 -0500 Subject: [PATCH 126/136] Require `step: float` input to `slice_from_time()` There's been way too many issues when trying to calculate this dynamically from the input array, so just expect the caller to know what it's doing and don't bother with ever hitting the error case of calculating and incorrect value internally. --- piker/data/_pathops.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index d8c15511d6..48a11f4022 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -295,7 +295,7 @@ def slice_from_time( arr: np.ndarray, start_t: float, stop_t: float, - step: int | None = None, + step: float, # sampler period step-diff ) -> slice: ''' @@ -324,12 +324,6 @@ def slice_from_time( # end of the input array. read_i_max = arr.shape[0] - # TODO: require this is always passed in? - if step is None: - step = round(t_last - times[-2]) - if step == 0: - step = 1 - # compute (presumed) uniform-time-step index offsets i_start_t = floor(start_t) read_i_start = floor(((i_start_t - t_first) // step)) - 1 @@ -412,7 +406,7 @@ def slice_from_time( times[read_i_start:], # times, i_stop_t, - side='left', + side='right', ) if ( From 32926747c64ed836e2a84cdf45b215a683444d9d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 15:05:42 -0500 Subject: [PATCH 127/136] Always pass `step` to `slice_from_time()` in the `Viz` As per the change to `slice_from_time()` this ensures this `Viz` always passes its self-calculated time indexing step size to the time slicing routine(s). Further this contains a slight impl tweak to `.scalars_from_index()` to slice the actual view range from `xref` to `Viz.ViewState.xrange[1]` and then reading the corresponding `yref` from the first entry in that array; this should be no slower in theory and makes way for further caching of x-read-range to `ViewState` opportunities later. --- piker/ui/_dataviz.py | 42 +++++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 2d1b6eed8e..2672cab04d 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -239,6 +239,8 @@ class ViewState(Struct): float | int ] | None = None + # TODO: cache the (ixl, ixr) read_slc-into-.array style slice index? + # (ymn, ymx) "output" min and max in viewed y-codomain yrange: tuple[ float | int, @@ -655,6 +657,7 @@ def read( array, start_t=lbar, stop_t=rbar, + step=self.index_step(), ) # TODO: maybe we should return this from the slicer call @@ -1400,8 +1403,23 @@ def scalars_from_index( self, xref: float | None = None, - ) -> tuple[int, float, float, float]: + ) -> tuple[ + int, + float, + float, + float, + ]: + ''' + Calculate and deliver the log-returns scalars specifically + according to y-data supported on this ``Viz``'s underlying + x-domain data range from ``xref`` -> ``.vs.xrange[1]``. + + The main use case for this method (currently) is to generate + scalars which will allow calculating the required y-range for + some "pinned" curve to be aligned *from* the ``xref`` time + stamped datum *to* the curve rendered by THIS viz. + ''' vs = self.vs arr = vs.in_view @@ -1409,22 +1427,28 @@ def scalars_from_index( # .vs.xrange input for caching? # read_slc_start = self.i_from_t(xref) - slc = slice_from_time( + read_slc = slice_from_time( arr=self.vs.in_view, start_t=xref, - stop_t=xref, + stop_t=vs.xrange[1], + step=self.index_step(), ) - read_slc_start = slc.start - key = 'open' if self.is_ohlc else self.name - yref = arr[read_slc_start][key] + + # NOTE: old code, it's no faster right? + # read_slc_start = read_slc.start + # yref = arr[read_slc_start][key] + + read = arr[read_slc][key] + yref = read[0] ymn, ymx = self.vs.yrange # print( - # f'INTERSECT xref: {read_slc_start}\n' - # f'ymn, ymx: {(ymn, ymx)}\n' + # f'Viz[{self.name}].scalars_from_index(xref={xref})\n' + # f'read_slc: {read_slc}\n' + # f'ymnmx: {(ymn, ymx)}\n' # ) return ( - read_slc_start, + read_slc.start, yref, (ymx - yref) / yref, (ymn - yref) / yref, From 8d1c713a5a215eb6e4b3517a7bff17c8f6040ef2 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 15:18:34 -0500 Subject: [PATCH 128/136] Always pass step to `slice_from_time()` in view mode Again, as per the signature change, never expect implicit time step calcs from overlay processing/machinery code. Also, extend the debug printing (yet again) to include better details around "rescale-due-to-minor-range-out-of-view" cases and a detailed msg for the transform/scaling calculation (inputs/outputs), particularly for the cases when one of the curves has a lesser support. --- piker/ui/view_mode.py | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 3e15ffface..f8992a0ad2 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -86,6 +86,7 @@ def intersect_from_longer( start_t_second: float, in_view_second: np.ndarray, + step: float, ) -> np.ndarray: @@ -114,6 +115,7 @@ def intersect_from_longer( arr=longer, start_t=find_t, stop_t=find_t, + step=step, ) return ( longer[slc.start], @@ -228,15 +230,13 @@ def overlay_viewlists( or len(overlay_viz_items) < 2 ): viz = active_viz - if debug_print: - print(f'ONLY ranging THIS viz: {viz.name}') - out = _maybe_calc_yrange( viz, yrange_kwargs, profiler, chart_name, ) + if out is None: continue @@ -244,6 +244,9 @@ def overlay_viewlists( viz.plot.vb._set_yrange(**yrange_kwargs) profiler(f'{viz.name}@{chart_name} single curve yrange') + if debug_print: + print(f'ONLY ranging THIS viz: {viz.name}') + # don't iterate overlays, just move to next chart continue @@ -362,7 +365,7 @@ def overlay_viewlists( f'y_ref: {y_ref}\n' f'ymn: {ymn}\n' f'ymx: {ymx}\n' - f'r_up disp: {r_up}\n' + f'r_up: {r_up}\n' f'r_down: {r_down}\n' f'(full) disp: {disp}\n' ) @@ -398,6 +401,7 @@ def overlay_viewlists( dnt.in_view, start_t, in_view, + viz.index_step(), ) profiler(f'{viz.name}@{chart_name} intersect by t') @@ -451,6 +455,7 @@ def overlay_viewlists( upt.in_view, start_t, in_view, + viz.index_step(), ) profiler(f'{viz.name}@{chart_name} intersect by t') @@ -669,6 +674,19 @@ def overlay_viewlists( r_down_from_major_at_xref, ) = mx_viz.scalars_from_index(xref) + if debug_print: + print( + 'MAJOR PIN SCALING\n' + f'mx_xref: {mx_xref}\n' + f'major i_start: {i_start}\n' + f'y_ref_major: {y_ref_major}\n' + f'r_up_from_major_at_xref {r_up_from_major_at_xref}\n' + f'r_down_from_major_at_xref: {r_down_from_major_at_xref}\n' + f'-----to minor-----\n' + f'xref: {xref}\n' + f'y_start: {y_start}\n' + f'yref: {yref}\n' + ) ymn = yref * (1 + r_down_from_major_at_xref) ymx = yref * (1 + r_up_from_major_at_xref) @@ -693,8 +711,8 @@ def overlay_viewlists( if debug_print: print( - f'RESCALE {viz.name} ymn -> {y_min}' - f'RESCALE MAJ ymn -> {mx_ymn}' + f'RESCALE {mx_viz.name} DUE TO {viz.name} ymn -> {y_min}\n' + f'-> MAJ ymn (w r_down: {r_dn_minor}) -> {mx_ymn}\n\n' ) # rescale all already scaled curves to new # increased range for this side as @@ -735,8 +753,8 @@ def overlay_viewlists( if debug_print: print( - f'RESCALE {viz.name} ymn -> {y_max}' - f'RESCALE MAJ ymx -> {mx_ymx}' + f'RESCALE {mx_viz.name} DUE TO {viz.name} ymx -> {y_max}\n' + f'-> MAJ ymx (r_up: {r_up_minor} -> {mx_ymx}\n\n' ) for _view in scaled: From 5958acebe19199ab30ffc2f285bf70d92c7bf895 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 15:35:07 -0500 Subject: [PATCH 129/136] Add (commented) draft 1min OHLC time index logging For the purposes of eventually trying to resolve last-step indexing synchronization (an intermittent but still existing) issue(s) that can happen due to races during history frame query and shm writing during startup. In fact, here we drop all `hist_viz` info queries from the main display loop for now anticipating that this code will either be removed or improved later. --- piker/ui/_display.py | 46 +++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 3b779edfe9..1ff9c4dc17 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -32,6 +32,7 @@ import tractor import trio import pyqtgraph as pg +# import pendulum from msgspec import field @@ -187,8 +188,6 @@ class DisplayState(Struct): 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, - # 'last_mx': 0, - # 'last_mn': 0, } ) hist_vars: dict[str, Any] = field( @@ -196,8 +195,6 @@ class DisplayState(Struct): 'i_last': 0, 'i_last_append': 0, 'last_mx_vlm': 0, - # 'last_mx': 0, - # 'last_mn': 0, } ) @@ -773,20 +770,33 @@ def graphics_update_cycle( ) profiler('main vb y-autorange') - # SLOW CHART y-auto-range resize case - ( - _, - hist_liv, - _, - _, - _, - _, - _, - ) = hist_viz.incr_info( - ds=ds, - is_1m=True, - ) - profiler('hist `Viz.incr_info()`') + # SLOW CHART y-auto-range resize casd + # (NOTE: still is still inside the y-range + # guard block above!) + # ( + # _, + # hist_liv, + # _, + # _, + # _, + # _, + # _, + # ) = hist_viz.incr_info( + # ds=ds, + # is_1m=True, + # ) + + # if hist_liv: + # times = hist_viz.shm.array['time'] + # last_t = times[-1] + # dt = pendulum.from_timestamp(last_t) + # log.info( + # f'{hist_viz.name} TIMESTEP:' + # f'epoch: {last_t}\n' + # f'datetime: {dt}\n' + # ) + + # profiler('hist `Viz.incr_info()`') # hist_chart = ds.hist_chart # if ( From 8a87e5f3905c3bc83d6593482286268e7c703389 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 15:41:38 -0500 Subject: [PATCH 130/136] Remove leftover debug print in cache reset meth --- piker/ui/_chart.py | 1 - 1 file changed, 1 deletion(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 5f6c9ed1e4..7811278bab 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1330,7 +1330,6 @@ def reset_graphics_caches(self) -> None: dsg.reset_cache(), ) try: - print("RESETTING ALL") yield finally: stack.close() From 12e196a6f7bee309b2d497dc1e6d839545b714fc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 15:42:06 -0500 Subject: [PATCH 131/136] Catch `KeyError` on bcast errors which pop the sub Not sure how i missed this (and left in handling of `list.remove()` and it ever worked for that?) after the `samplerd` impl in 5ec1a72 but, this adjusts the remove-broken-subscriber loop to catch the correct `set.remove()` exception type on a missing (likely already removed) subscription entry. --- piker/data/_sampling.py | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py index ec29c6ae2e..84dce08e2d 100644 --- a/piker/data/_sampling.py +++ b/piker/data/_sampling.py @@ -87,7 +87,6 @@ class Sampler: # holds all the ``tractor.Context`` remote subscriptions for # a particular sample period increment event: all subscribers are # notified on a step. - # subscribers: dict[int, list[tractor.MsgStream]] = {} subscribers: defaultdict[ float, list[ @@ -240,8 +239,11 @@ async def broadcast( subscribers for a given sample period. ''' + pair: list[float, set] pair = self.subscribers[period_s] + last_ts: float + subs: set last_ts, subs = pair task = trio.lowlevel.current_task() @@ -281,7 +283,7 @@ async def broadcast( for stream in borked: try: subs.remove(stream) - except ValueError: + except KeyError: log.warning( f'{stream._ctx.chan.uid} sub already removed!?' ) @@ -429,7 +431,7 @@ async def maybe_open_samplerd( loglevel: str | None = None, **kwargs, -) -> tractor._portal.Portal: # noqa +) -> tractor.Portal: # noqa ''' Client-side helper to maybe startup the ``samplerd`` service under the ``pikerd`` tree. @@ -619,6 +621,14 @@ async def sample_and_broadcast( fqsn = f'{broker_symbol}.{brokername}' lags: int = 0 + # TODO: speed up this loop in an AOT compiled lang (like + # rust or nim or zig) and/or instead of doing a fan out to + # TCP sockets here, we add a shm-style tick queue which + # readers can pull from instead of placing the burden of + # broadcast on solely on this `brokerd` actor. see issues: + # - https://github.com/pikers/piker/issues/98 + # - https://github.com/pikers/piker/issues/107 + for (stream, tick_throttle) in subs.copy(): try: with trio.move_on_after(0.2) as cs: @@ -748,9 +758,6 @@ def frame_ticks( ticks_by_type[ttype].append(tick) -# TODO: a less naive throttler, here's some snippets: -# token bucket by njs: -# https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9 async def uniform_rate_send( rate: float, @@ -760,8 +767,22 @@ async def uniform_rate_send( task_status: TaskStatus = trio.TASK_STATUS_IGNORED, ) -> None: + ''' + Throttle a real-time (presumably tick event) stream to a uniform + transmissiom rate, normally for the purposes of throttling a data + flow being consumed by a graphics rendering actor which itself is limited + by a fixed maximum display rate. + + Though this function isn't documented (nor was intentially written + to be) a token-bucket style algo, it effectively operates as one (we + think?). - # try not to error-out on overruns of the subscribed (chart) client + TODO: a less naive throttler, here's some snippets: + token bucket by njs: + https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9 + + ''' + # try not to error-out on overruns of the subscribed client stream._ctx._backpressure = True # TODO: compute the approx overhead latency per cycle From 32339cb41a1473048acbf1d8313b4827de0ea575 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 7 Mar 2023 20:40:21 -0500 Subject: [PATCH 132/136] Always show a minimum bars during startup This is particularly more "good looking" when we boot with a pair that doesn't have historical 1s OHLC and thus the fast chart is empty from outset. In this case it's a lot nicer to be already zoomed to a comfortable preset number of "datums in view" even when the history isn't yet filled in. Adjusts the chart display `Viz.default_view()` startup to explicitly ensure this happens via the `do_min_bars=True` flag B) --- piker/ui/_dataviz.py | 26 ++++++++++++++++++++------ piker/ui/_display.py | 20 +++++++++++++++----- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 2672cab04d..d70deb345c 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1082,12 +1082,10 @@ def default_view( data_diff = last_datum - first_datum rl_diff = vr - vl rescale_to_data: bool = False - # new_uppx: float = 1 if rl_diff > data_diff: rescale_to_data = True rl_diff = data_diff - new_uppx: float = data_diff / self.px_width() # orient by offset from the y-axis including # space to compensate for the L1 labels. @@ -1097,14 +1095,28 @@ def default_view( offset = l1_offset if rescale_to_data: + new_uppx: float = data_diff / self.px_width() offset = (offset / uppx) * new_uppx else: offset = (y_offset * step) + uppx*step + # NOTE: if we are in the midst of start-up and a bunch of + # widgets are spawning/rendering concurrently, it's likely the + # label size above `l1_offset` won't have yet fully rendered. + # Here we try to compensate for that ensure at least a static + # bar gap between the last datum and the y-axis. + if ( + do_min_bars + and offset <= (6 * step) + ): + offset = 6 * step + # align right side of view to the rightmost datum + the selected # offset from above. - r_reset = (self.graphics.x_last() or last_datum) + offset + r_reset = ( + self.graphics.x_last() or last_datum + ) + offset # no data is in view so check for the only 2 sane cases: # - entire view is LEFT of data @@ -1129,7 +1141,6 @@ def default_view( else: log.warning(f'Unknown view state {vl} -> {vr}') return - else: # maintain the l->r view distance l_reset = r_reset - rl_diff @@ -1138,7 +1149,11 @@ def default_view( do_min_bars and (r_reset - l_reset) < min_bars_from_y ): - l_reset = r_reset - min_bars_from_y + l_reset = ( + (r_reset + offset) + - + min_bars_from_y * step + ) # remove any custom user yrange setttings if chartw._static_yrange == 'axis': @@ -1152,7 +1167,6 @@ def default_view( if do_ds: view.interact_graphics_cycle() - view.interact_graphics_cycle() def incr_info( self, diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 1ff9c4dc17..3da3380948 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -416,7 +416,9 @@ async def graphics_update_loop( ds.vlm_chart = vlm_chart ds.vlm_sticky = vlm_sticky - fast_chart.main_viz.default_view() + fast_chart.main_viz.default_view( + do_min_bars=True, + ) # ds.hist_vars.update({ # 'i_last_append': 0, @@ -1456,7 +1458,9 @@ async def display_symbol_data( for fqsn, flume in feed.flumes.items(): # size view to data prior to order mode init - rt_chart.main_viz.default_view() + rt_chart.main_viz.default_view( + do_min_bars=True, + ) rt_linked.graphics_cycle() # TODO: look into this because not sure why it was @@ -1467,7 +1471,9 @@ async def display_symbol_data( # determine if auto-range adjustements should be made. # rt_linked.subplots.pop('volume', None) - hist_chart.main_viz.default_view() + hist_chart.main_viz.default_view( + do_min_bars=True, + ) hist_linked.graphics_cycle() godwidget.resize_all() @@ -1510,10 +1516,14 @@ async def display_symbol_data( # default view adjuments and sidepane alignment # as final default UX touch. - rt_chart.main_viz.default_view() + rt_chart.main_viz.default_view( + do_min_bars=True, + ) await trio.sleep(0) - hist_chart.main_viz.default_view() + hist_chart.main_viz.default_view( + do_min_bars=True, + ) hist_viz = hist_chart.get_viz(fqsn) await trio.sleep(0) From 3066b1541e46364447712a1f319c2d02d91470f5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 16:27:22 -0500 Subject: [PATCH 133/136] Handle (shorter supported) minor-curve not-in-view Solve this by always scaling the y-range for the major/target curve *before* the final overlay scaling loop; this implicitly always solve the case where the major series is the only one in view. Tidy up debug print formatting and add some loop-end demarcation comment lines. --- piker/ui/_dataviz.py | 5 ++++- piker/ui/view_mode.py | 45 ++++++++++++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index d70deb345c..3c686619ba 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -1422,7 +1422,7 @@ def scalars_from_index( float, float, float, - ]: + ] | None: ''' Calculate and deliver the log-returns scalars specifically according to y-data supported on this ``Viz``'s underlying @@ -1454,6 +1454,9 @@ def scalars_from_index( # yref = arr[read_slc_start][key] read = arr[read_slc][key] + if not read.size: + return None + yref = read[0] ymn, ymx = self.vs.yrange # print( diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index f8992a0ad2..5b6b22cc7f 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -337,8 +337,9 @@ def overlay_viewlists( # determine start datum in view in_view = viz.vs.in_view - if not in_view.size: - log.warning(f'{viz.name} not in view?') + if in_view.size < 2: + if debug_print: + print(f'{viz.name} not in view?') continue row_start = in_view[0] @@ -501,6 +502,8 @@ def overlay_viewlists( ) profiler(f'{viz.name}@{chart_name} yrange scan complete') + # __ END OF scan phase (loop) __ + # NOTE: if no there were no overlay charts # detected/collected (could be either no group detected or # chart with a single symbol, thus a single viz/overlay) @@ -610,10 +613,8 @@ def overlay_viewlists( ) = overlay_table[full_disp] key = 'open' if viz.is_ohlc else viz.name - xref = minor_in_view[0]['time'] match method: - # Pin this curve to the "major dispersion" (or other # target) curve: # @@ -667,12 +668,16 @@ def overlay_viewlists( f'yref@xref_pin: {yref}\n' ) + mx_scalars = mx_viz.scalars_from_index(xref) + if mx_scalars is None: + continue + ( i_start, y_ref_major, r_up_from_major_at_xref, r_down_from_major_at_xref, - ) = mx_viz.scalars_from_index(xref) + ) = mx_scalars if debug_print: print( @@ -680,8 +685,10 @@ def overlay_viewlists( f'mx_xref: {mx_xref}\n' f'major i_start: {i_start}\n' f'y_ref_major: {y_ref_major}\n' - f'r_up_from_major_at_xref {r_up_from_major_at_xref}\n' - f'r_down_from_major_at_xref: {r_down_from_major_at_xref}\n' + f'r_up_from_major_at_xref ' + f'{r_up_from_major_at_xref}\n' + f'r_down_from_major_at_xref: ' + f'{r_down_from_major_at_xref}\n' f'-----to minor-----\n' f'xref: {xref}\n' f'y_start: {y_start}\n' @@ -711,8 +718,10 @@ def overlay_viewlists( if debug_print: print( - f'RESCALE {mx_viz.name} DUE TO {viz.name} ymn -> {y_min}\n' - f'-> MAJ ymn (w r_down: {r_dn_minor}) -> {mx_ymn}\n\n' + f'RESCALE {mx_viz.name} DUE TO {viz.name} ' + f'ymn -> {y_min}\n' + f'-> MAJ ymn (w r_down: {r_dn_minor}) ' + f'-> {mx_ymn}\n\n' ) # rescale all already scaled curves to new # increased range for this side as @@ -753,8 +762,10 @@ def overlay_viewlists( if debug_print: print( - f'RESCALE {mx_viz.name} DUE TO {viz.name} ymx -> {y_max}\n' - f'-> MAJ ymx (r_up: {r_up_minor} -> {mx_ymx}\n\n' + f'RESCALE {mx_viz.name} DUE TO {viz.name} ' + f'ymx -> {y_max}\n' + f'-> MAJ ymx (r_up: {r_up_minor} ' + f'-> {mx_ymx}\n\n' ) for _view in scaled: @@ -815,6 +826,13 @@ def overlay_viewlists( f'overlay ``method`` is invalid `{method}' ) + # __ END OF transform calc phase (loop) __ + + # finally, scale the major target/dispersion curve to + # the (possibly re-scaled/modified) values were set in + # transform phase loop. + mx_view._set_yrange(yrange=(mx_ymn, mx_ymx)) + if scaled: if debug_print: print( @@ -861,10 +879,7 @@ def overlay_viewlists( '--------------------------------\n' ) - # finally, scale the major target/dispersion curve to - # the (possibly re-scaled/modified) values were set in - # transform phase loop. - mx_view._set_yrange(yrange=(mx_ymn, mx_ymx)) + # __ END OF overlay scale phase (loop) __ if debug_print: print( From 5c697de58e1ff02dc19de8c70cbadfab447af63b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 9 Mar 2023 16:32:50 -0500 Subject: [PATCH 134/136] Presume never handling not-in-view case for minor curves --- piker/ui/view_mode.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 5b6b22cc7f..42fefd62e8 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -668,16 +668,18 @@ def overlay_viewlists( f'yref@xref_pin: {yref}\n' ) - mx_scalars = mx_viz.scalars_from_index(xref) - if mx_scalars is None: - continue + # if we need to handle not-in-view cases? + # mx_scalars = mx_viz.scalars_from_index(xref) + # if mx_scalars is None: + # continue ( i_start, y_ref_major, r_up_from_major_at_xref, r_down_from_major_at_xref, - ) = mx_scalars + ) = mx_viz.scalars_from_index(xref) + # ) = mx_scalars if debug_print: print( From 1aab9f1f81431f204fcc7ee8e57be5e3b3e7c5ef Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 10 Mar 2023 09:54:32 -0500 Subject: [PATCH 135/136] Actually yes, we need to handle empty in-view range.. --- piker/ui/view_mode.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py index 42fefd62e8..ecb62557af 100644 --- a/piker/ui/view_mode.py +++ b/piker/ui/view_mode.py @@ -668,18 +668,17 @@ def overlay_viewlists( f'yref@xref_pin: {yref}\n' ) - # if we need to handle not-in-view cases? - # mx_scalars = mx_viz.scalars_from_index(xref) - # if mx_scalars is None: - # continue - + # XXX: we need to handle not-in-view cases? + # still not sure why or when tf this happens.. + mx_scalars = mx_viz.scalars_from_index(xref) + if mx_scalars is None: + continue ( i_start, y_ref_major, r_up_from_major_at_xref, r_down_from_major_at_xref, - ) = mx_viz.scalars_from_index(xref) - # ) = mx_scalars + ) = mx_scalars if debug_print: print( From 889e92079611e2ff0d1a18793af081b401646474 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 13 Mar 2023 12:18:54 -0400 Subject: [PATCH 136/136] Short-circuit rendering on no 1d-data; avoid m4 layer crash --- piker/ui/_render.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 69bd37c40c..fb41b696b8 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -192,6 +192,10 @@ def render( ) = fmt_out + if not x_1d.size: + log.warning(f'{array_key} has no `.size`?') + return + # redraw conditions if ( prepend_length > 0