generated from bananaml/serverless-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
28 lines (22 loc) · 880 Bytes
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# from transformers import pipeline
from sentence_transformers import SentenceTransformer
import torch
# Init is ran on server startup
# Load your model to GPU as a global variable here using the variable name "model"
def init():
global model
device = 1 if torch.cuda.is_available() else None
# model = pipeline('fill-mask', model='bert-base-uncased', device=device)
model = SentenceTransformer('multi-qa-mpnet-base-dot-v1', device=device)
# Inference is ran for every server call
# Reference your preloaded global model variable here.
def inference(model_inputs:dict) -> dict:
global model
# Parse out your arguments
texts = model_inputs.get('text', None)
if texts == None:
return {'message': "No text provided"}
# Run the model
result = model.encode(texts)
# Return the results as a dictionary
return result