Update README.md
Browse files
README.md
CHANGED
@@ -11,6 +11,125 @@ tags:
|
|
11 |
- trl
|
12 |
---
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# Uploaded model
|
15 |
|
16 |
- **Developed by:** patched-codes
|
|
|
11 |
- trl
|
12 |
---
|
13 |
|
14 |
+
# How to use?
|
15 |
+
|
16 |
+
- We use Unsloth for faster inference and load the adapter:
|
17 |
+
|
18 |
+
```python
|
19 |
+
from unsloth import FastLanguageModel
|
20 |
+
max_seq_length = 8192
|
21 |
+
dtype = None
|
22 |
+
load_in_4bit = True
|
23 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
24 |
+
model_name = "patched-codes/Llama-3.2-1B-FastApply",
|
25 |
+
max_seq_length = max_seq_length,
|
26 |
+
dtype = dtype,
|
27 |
+
load_in_4bit = load_in_4bit,
|
28 |
+
)
|
29 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
30 |
+
```
|
31 |
+
|
32 |
+
- The model works with original code and the edited code as input to generate the final updated code:
|
33 |
+
|
34 |
+
```python
|
35 |
+
original_code = """import React from 'react';
|
36 |
+
import { Loader } from 'lucide-react';
|
37 |
+
|
38 |
+
interface ButtonProps {
|
39 |
+
text: string;
|
40 |
+
onClick?: () => void;
|
41 |
+
loading?: boolean;
|
42 |
+
disabled?: boolean;
|
43 |
+
icon?: React.ReactNode;
|
44 |
+
}
|
45 |
+
|
46 |
+
const Button: React.FC<ButtonProps> = ({
|
47 |
+
text,
|
48 |
+
onClick,
|
49 |
+
loading = false,
|
50 |
+
disabled = false,
|
51 |
+
icon
|
52 |
+
}) => (
|
53 |
+
<button
|
54 |
+
className="bg-blue-500 text-white p-2 rounded flex items-center gap-2"
|
55 |
+
onClick={onClick}
|
56 |
+
disabled={disabled || loading}
|
57 |
+
>
|
58 |
+
{loading ? <Loader className="animate-spin" /> : icon}
|
59 |
+
{text}
|
60 |
+
</button>
|
61 |
+
);
|
62 |
+
|
63 |
+
export default Button;
|
64 |
+
"""
|
65 |
+
|
66 |
+
update_snippet = """interface ButtonProps {
|
67 |
+
variant?: 'primary' | 'secondary' | 'danger';
|
68 |
+
size?: 'small' | 'medium' | 'large';
|
69 |
+
// ... other props
|
70 |
+
}
|
71 |
+
|
72 |
+
const Button: React.FC<ButtonProps> = ({
|
73 |
+
variant = 'primary',
|
74 |
+
size = 'medium',
|
75 |
+
// ... other props
|
76 |
+
}) => (
|
77 |
+
<button
|
78 |
+
className={`flex items-center gap-2 rounded ${
|
79 |
+
size === 'small' ? 'p-1 text-sm' :
|
80 |
+
size === 'large' ? 'p-3 text-lg' :
|
81 |
+
'p-2 text-md'
|
82 |
+
} ${
|
83 |
+
variant === 'primary' ? 'bg-blue-500 text-white' :
|
84 |
+
variant === 'secondary' ? 'bg-gray-500 text-white' :
|
85 |
+
'bg-red-500 text-white'
|
86 |
+
}`}
|
87 |
+
// ... other attributes
|
88 |
+
>
|
89 |
+
// ... existing code ...
|
90 |
+
</button>
|
91 |
+
);
|
92 |
+
"""
|
93 |
+
```
|
94 |
+
|
95 |
+
- Prepare your input following the prompt structure:
|
96 |
+
|
97 |
+
```python
|
98 |
+
input_text = f"""
|
99 |
+
Merge all changes from the <update> snippet into the <code> below.
|
100 |
+
- Preserve the code's structure, order, comments, and indentation exactly.
|
101 |
+
- Output only the updated code, enclosed within <updated-code> and </updated-code> tags.
|
102 |
+
- Do not include any additional text, explanations, placeholders, ellipses, or code fences.
|
103 |
+
|
104 |
+
<code>{original_code}</code>
|
105 |
+
|
106 |
+
<update>{update_snippet}</update>
|
107 |
+
|
108 |
+
Provide the complete updated code.
|
109 |
+
"""
|
110 |
+
|
111 |
+
messages = [
|
112 |
+
{"role": "system", "content": "You are a coding assistant that helps merge code updates, ensuring every modification is fully integrated."},
|
113 |
+
{"role": "user", "content": input_text.strip()},
|
114 |
+
]
|
115 |
+
|
116 |
+
inputs = tokenizer.apply_chat_template(
|
117 |
+
messages,
|
118 |
+
tokenize = True,
|
119 |
+
add_generation_prompt = True, # Must add for generation
|
120 |
+
return_tensors = "pt",
|
121 |
+
).to("cuda")
|
122 |
+
|
123 |
+
from transformers import TextStreamer
|
124 |
+
text_streamer = TextStreamer(tokenizer, skip_prompt = True)
|
125 |
+
output = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 8192,
|
126 |
+
use_cache = True, temperature = 1.5, min_p = 0.1)
|
127 |
+
|
128 |
+
response = tokenizer.decode(output[0][len(inputs[0]):])
|
129 |
+
|
130 |
+
updated_code = response.split("<updated-code>")[1].split("</updated-code>")[0]
|
131 |
+
```
|
132 |
+
|
133 |
# Uploaded model
|
134 |
|
135 |
- **Developed by:** patched-codes
|