Joash2024 commited on
Commit
628f881
1 Parent(s): f371603

feat: configure for ZeroGPU with A100

Browse files
Files changed (3) hide show
  1. README.md +5 -4
  2. app.py +4 -1
  3. requirements.txt +1 -0
README.md CHANGED
@@ -4,18 +4,19 @@ emoji: 🧮
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.8.0
8
  app_file: app.py
9
  pinned: false
10
  hardware:
11
- accelerator: a100
12
- gpu: true
13
  python_packages:
14
  - "torch>=2.0.0"
15
  - "transformers>=4.30.0"
16
  - "accelerate>=0.20.0"
17
  - "peft==0.5.0"
18
  - "numpy>=1.21.0"
 
19
  ---
20
 
21
  # Mathematics Derivative Solver V2
@@ -29,7 +30,7 @@ This Space demonstrates our fine-tuned math model for solving derivatives. We us
29
 
30
  - Step-by-step derivative solutions
31
  - LaTeX notation support
32
- - A100 GPU acceleration
33
  - Float16 precision for efficient inference
34
 
35
  ## Supported Functions
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.44.1
8
  app_file: app.py
9
  pinned: false
10
  hardware:
11
+ zerogpu: true
12
+ memory: 16
13
  python_packages:
14
  - "torch>=2.0.0"
15
  - "transformers>=4.30.0"
16
  - "accelerate>=0.20.0"
17
  - "peft==0.5.0"
18
  - "numpy>=1.21.0"
19
+ - "spaces>=0.1.0"
20
  ---
21
 
22
  # Mathematics Derivative Solver V2
 
30
 
31
  - Step-by-step derivative solutions
32
  - LaTeX notation support
33
+ - A100 GPU acceleration via ZeroGPU
34
  - Float16 precision for efficient inference
35
 
36
  ## Supported Functions
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from peft import PeftModel
 
5
 
6
  # Model configurations
7
  BASE_MODEL = "HuggingFaceTB/SmolLM2-1.7B-Instruct" # Base model
@@ -36,9 +37,10 @@ def format_prompt(function: str) -> str:
36
  Function: {function}
37
  The derivative of this function is:"""
38
 
 
39
  def generate_derivative(function: str, max_length: int = 100) -> str:
40
  """Generate derivative for a given function"""
41
- # Format the prompt
42
  prompt = format_prompt(function)
43
 
44
  # Tokenize
@@ -61,6 +63,7 @@ def generate_derivative(function: str, max_length: int = 100) -> str:
61
 
62
  return derivative
63
 
 
64
  def solve_derivative(function: str) -> str:
65
  """Solve derivative and format output"""
66
  if not function:
 
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from peft import PeftModel
5
+ import spaces
6
 
7
  # Model configurations
8
  BASE_MODEL = "HuggingFaceTB/SmolLM2-1.7B-Instruct" # Base model
 
37
  Function: {function}
38
  The derivative of this function is:"""
39
 
40
+ @spaces.GPU
41
  def generate_derivative(function: str, max_length: int = 100) -> str:
42
  """Generate derivative for a given function"""
43
+ # Format prompt
44
  prompt = format_prompt(function)
45
 
46
  # Tokenize
 
63
 
64
  return derivative
65
 
66
+ @spaces.GPU
67
  def solve_derivative(function: str) -> str:
68
  """Solve derivative and format output"""
69
  if not function:
requirements.txt CHANGED
@@ -4,3 +4,4 @@ accelerate>=0.20.0
4
  peft==0.5.0
5
  gradio==4.44.1
6
  numpy>=1.21.0
 
 
4
  peft==0.5.0
5
  gradio==4.44.1
6
  numpy>=1.21.0
7
+ spaces>=0.1.0