Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Template for InferenceAPI #11

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,10 @@ def _prepare_extras():
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
entry_points="""
[console_scripts]
torchserve-dashboard=torchserve_dashboard.cli:main
""",
entry_points={
'console_scripts':['torchserve-management=torchserve_dashboard.cli:manage',
'torchserve-inference=torchserve_dashboard.cli:ui']
# tentative..torchserve-dashboard may spin both independently.
# definitely want things on different ports though (should have port assignements too, to avoid confusion)
}
)
Empty file.
22 changes: 22 additions & 0 deletions torchserve_dashboard/api/inference_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import os
from typing import Any, Dict, List, Optional, Tuple, Union, Callable

import httpx
from httpx import Response

import logging

class InferenceAPI:
def __init__(self, address: str, error_callback: Callable = None) -> None:
pass

# get available models
# determine type of model (NOT currently possible, we don't know which endpoint maps to what type)
# auto generate python client based on model spec? (use library)
# or define explicitly like below (not sustainable)
def image_classify(endpoint,image):
# put image in request body
#
# res send(address+endpoint,req)
# return res
pass
File renamed without changes.
15 changes: 13 additions & 2 deletions torchserve_dashboard/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,18 @@
@configurator_options
@click.argument("args", nargs=-1)
@click.pass_context
def main(ctx: click.Context, args: Any, **kwargs: Any):
def manage(ctx: click.Context, args: Any, **kwargs: Any):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'dash.py')
filename = os.path.join(dirname, 'ui/management_dash.py')
ctx.forward(streamlit.cli.main_run, target=filename, args=args, *kwargs)


@click.command(context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True))
@configurator_options
@click.argument("args", nargs=-1)
@click.pass_context
def ui(ctx: click.Context, args: Any, **kwargs: Any):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'ui/inference_dash.py')
ctx.forward(streamlit.cli.main_run, target=filename, args=args, *kwargs)
Empty file.
File renamed without changes
16 changes: 16 additions & 0 deletions torchserve_dashboard/ui/inference_dash.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import streamlit as st

#def ImageClassifier Component (may auto generate based on spec)


#def ImageSegmentation Component


#def TextClassifier Component

#...

# list available endpoints
# show component based on model type (NOT currently possible, we don't know which endpoint maps to what type)
# call endpoint api with inputs

Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
import streamlit as st
from httpx import Response

from torchserve_dashboard.api import ManagementAPI, LocalTS
from torchserve_dashboard.api.management_api import ManagementAPI, LocalTS
from torchserve_dashboard import _PACKAGE_ROOT
from pathlib import Path

st.set_page_config(
Expand Down Expand Up @@ -48,7 +49,7 @@ def check_args(args):
metrics_location = args.metrics_location
if not os.path.exists(config_path):
st.write(f"Can't find config file at {config_path}. Using default config instead")
config_path = os.path.join(os.path.dirname(__file__), "default.torchserve.properties")
config_path = os.path.join(_PACKAGE_ROOT, "default.torchserve.properties")
if os.path.exists(config_path):
config = open(config_path, "r").readlines()
for c in config:
Expand Down