Text Generation
Transformers
PyTorch
English
gptj
Inference Endpoints
adamluc commited on
Commit
d8b63ce
·
1 Parent(s): 0bfe74a

Delete handler.py

Browse files
Files changed (1) hide show
  1. handler.py +0 -27
handler.py DELETED
@@ -1,27 +0,0 @@
1
- import torch
2
- from typing import Dict, List, Any
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
-
5
- # check for GPU
6
- device = 0 if torch.cuda.is_available() else -1
7
-
8
-
9
- class EndpointHandler:
10
- def __init__(self, path=""):
11
- # load the model
12
- tokenizer = AutoTokenizer.from_pretrained(path)
13
- model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True)
14
- # create inference pipeline
15
- self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
16
-
17
- def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
18
- inputs = data.pop("inputs", data)
19
- parameters = data.pop("parameters", None)
20
-
21
- # pass inputs with all kwargs in data
22
- if parameters is not None:
23
- prediction = self.pipeline(inputs, **parameters)
24
- else:
25
- prediction = self.pipeline(inputs)
26
- # postprocess the prediction
27
- return prediction