philschmid HF staff commited on
Commit
9327b57
1 Parent(s): ab41b94

added custom handler for sharded loading

Browse files
Files changed (3) hide show
  1. README.md +9 -1
  2. handler.py +27 -0
  3. requirements.txt +1 -0
README.md CHANGED
@@ -1 +1,9 @@
1
- # Shareded fp16 copy of [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B)
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - endpoints-template
4
+ library_name: generic
5
+ ---
6
+
7
+ # Shareded fp16 copy of [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B)
8
+
9
+ > This is fork of [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) with shareded fp16 weights implementing a custom `handler.py` as an example for how to use `gpt-j` [inference-endpoints](https://hf.co/inference-endpoints)
handler.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Dict, List, Any
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
+
5
+ # check for GPU
6
+ device = 0 if torch.cuda.is_available() else -1
7
+
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path=""):
11
+ # load the model
12
+ tokenizer = AutoTokenizer.from_pretrained(path)
13
+ model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True)
14
+ # create inference pipeline
15
+ self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
16
+
17
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
18
+ inputs = data.pop("inputs", data)
19
+ parameters = data.pop("parameters", None)
20
+
21
+ # pass inputs with all kwargs in data
22
+ if parameters is not None:
23
+ prediction = self.pipeline(inputs, **parameters)
24
+ else:
25
+ prediction = self.pipeline(inputs)
26
+ # postprocess the prediction
27
+ return prediction
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ accelerate