Daemontatox commited on
Commit
d5e703b
·
verified ·
1 Parent(s): 1de0ba5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os, copy
2
  os.environ["RWKV_V7_ON"] = '1'
3
  os.environ["RWKV_JIT_ON"] = '1'
@@ -19,7 +20,7 @@ gpu_h = nvmlDeviceGetHandleByIndex(0)
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
  ctx_limit = 4000
22
- gen_limit = 1500
23
 
24
  ########################## text rwkv ################################################################
25
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
@@ -32,7 +33,7 @@ pipeline_v6 = PIPELINE(model_v6, "rwkv_vocab_v20230424")
32
  args = model_v6.args
33
 
34
  penalty_decay = 0.996
35
-
36
  def generate_prompt(instruction, input=""):
37
  instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
38
  input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
 
1
+ import spaces
2
  import os, copy
3
  os.environ["RWKV_V7_ON"] = '1'
4
  os.environ["RWKV_JIT_ON"] = '1'
 
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
  ctx_limit = 4000
23
+ gen_limit = 32000
24
 
25
  ########################## text rwkv ################################################################
26
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
 
33
  args = model_v6.args
34
 
35
  penalty_decay = 0.996
36
+ @spaces.GPU()
37
  def generate_prompt(instruction, input=""):
38
  instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
39
  input = input.strip().replace('\r\n','\n').replace('\n\n','\n')