File size: 3,217 Bytes
ed817dc
44092ff
ed817dc
44092ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42198c1
 
 
 
 
 
 
 
44092ff
 
 
 
 
 
 
 
 
 
 
b282946
44092ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import torch
import transformers
from torch import cuda
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from transformers import BitsAndBytesConfig, StoppingCriteria, StoppingCriteriaList
from typing import Dict, List, Any

class PreTrainedPipeline():
    def __init__(self, path=""):
        path = "oleksandrfluxon/mpt-7b-instruct-evaluate"
        print("===> path", path)

        device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
        print("===> device", device)

        model = transformers.AutoModelForCausalLM.from_pretrained(
            'oleksandrfluxon/mpt-7b-instruct-evaluate',
            trust_remote_code=True,
            load_in_8bit=True,  # this requires the `bitsandbytes` library
            max_seq_len=8192,
            init_device=device
        )
        model.eval()
        #model.to(device)
        print(f"===> Model loaded on {device}")

        tokenizer = transformers.AutoTokenizer.from_pretrained("mosaicml/mpt-7b")

        # we create a list of stopping criteria
        stop_token_ids = [
            tokenizer.convert_tokens_to_ids(x) for x in [
                ['Human', ':'], ['AI', ':']
            ]
        ]
        stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
        print("===> stop_token_ids", stop_token_ids)

        # define custom stopping criteria object
        class StopOnTokens(StoppingCriteria):
            def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
                for stop_ids in stop_token_ids:
                    if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
                        return True
                return False

        stopping_criteria = StoppingCriteriaList([StopOnTokens()])

        self.pipeline = transformers.pipeline(
            model=model, tokenizer=tokenizer,
            return_full_text=True,  # langchain expects the full text
            task='text-generation',
            # we pass model parameters here too
            stopping_criteria=stopping_criteria,  # without this model rambles during chat
            temperature=0.1,  # 'randomness' of outputs, 0.0 is the min and 1.0 the max
            top_p=0.15,  # select from top tokens whose probability add up to 15%
            top_k=0,  # select from top 0 tokens (because zero, relies on top_p)
            max_new_tokens=1000,  # mex number of tokens to generate in the output
            repetition_penalty=1.1  # without this output begins repeating
        )
        
        print("===> init finished")

    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
       data args:
            inputs (:obj: `str`)
            parameters (:obj: `str`)
      Return:
            A :obj:`str`: todo
        """
        # get inputs
        inputs = data.pop("inputs",data)
        parameters = data.pop("parameters", {})
        date = data.pop("date", None)
        print("===> inputs", inputs)
        print("===> parameters", parameters)

        result = self.pipeline(inputs, **parameters)
        print("===> result", result)

        return result