File size: 2,727 Bytes
7e192c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################

print('\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n')

import os, re
import json

os.environ['RWKV_JIT_ON'] = '0' #### set these before import RWKV
os.environ["RWKV_CUDA_ON"] = '0' #### set to '1' to compile CUDA kernel (10x faster) - requires c++ compiler & cuda libraries

from rwkv.model import RWKV #### pip install rwkv --upgrade
from rwkv.utils import PIPELINE, PIPELINE_ARGS

MODEL_FILE = '../../RWKV-5-World-3B-v2-20231113-ctx4096'

model = RWKV(model=MODEL_FILE, strategy='cuda bf16')
pipeline = PIPELINE(model, "rwkv_vocab_v20230424") #### vocab for rwkv-4-world models


def my_qa_generator(ctx,length):
    out_tokens = []
    out_len = 0
    out_str = ''
    occurrence = {}
    state = None
    for i in range(length):

        if i == 0:
            out, state = pipeline.model.forward(pipeline.encode(ctx), state)
        else:
            out, state = pipeline.model.forward([token], state)

        for n in occurrence: out[n] -= (0.4 + occurrence[n] * 0.4) #### higher repetition penalty because of lower top_p here
        
        token = pipeline.sample_logits(out, temperature=1.0, top_p=0.2) #### sample the next token

        if token == 0: break #### exit at token [0] = <|endoftext|>
        
        out_tokens += [token]

        for n in occurrence: occurrence[n] *= 0.996 #### decay repetition penalty
        occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
        
        tmp = pipeline.decode(out_tokens[out_len:])
        if ('\ufffd' not in tmp) and (not tmp.endswith('\n')): #### print() only when out_str is valid utf-8 and not end with \n
            out_str += tmp
            #print(tmp, end = '', flush = True)
            out_len = i + 1    
        elif '\n\n' in tmp: #### exit at '\n\n'
            tmp = tmp.rstrip()
            out_str += tmp
            #print(tmp, end = '', flush = True)
            break
    return out_str.strip()


def bench():

    data = json.load(open('heval_v1.json','r',encoding='utf-8'))
    yes = 0
    for i,q in enumerate(data):
        question = q['question']
        ctx = my_qa_generator(question,6)
        #ctx = tokenizer.tokenizer.decode(ctx)
        flag=False
        for ans in q['answer']:
            if ctx[:len(ans)] == ans:
                yes+=1
                flag=True
                print(i,yes,len(data),yes/(i+1))
      
    print('Score : ',yes/len(data)*100)


bench()