File size: 2,848 Bytes
eceedce
e0ecae1
8b67a67
e0ecae1
 
 
 
eceedce
e0ecae1
 
 
8363f0b
e0ecae1
 
 
8b67a67
 
 
 
8363f0b
 
8b67a67
e0ecae1
 
 
8b67a67
 
 
 
 
e0ecae1
 
 
8b67a67
 
e0ecae1
 
8b67a67
 
 
 
 
 
 
 
 
 
 
 
e0ecae1
 
 
 
 
 
 
8b67a67
e0ecae1
 
eceedce
e0ecae1
eceedce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DataCollatorForSeq2Seq

# Load the dataset
dataset = load_dataset("json", data_files="dataset.jsonl")

# Load the pre-trained model and tokenizer
model_name = "Salesforce/codegen-2B-multi"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token

# Tokenize the dataset
def tokenize_function(examples):
    return tokenizer(
        examples["input"],
        text_target=examples["output"],
        truncation=True,  # Truncate sequences longer than max_length
        max_length=512,   # Adjust max length if needed
        padding="max_length"  # Pad sequences to max_length
    )

tokenized_dataset = dataset.map(tokenize_function, batched=True)

for i, example in enumerate(tokenized_dataset["train"]):
    input_len = len(example["input_ids"])
    output_len = len(example["labels"])
    print(f"Example {i}: Input length = {input_len}, Output length = {output_len}")
    
# Define training arguments
training_args = TrainingArguments(
    output_dir="./results",
    per_device_train_batch_size=1,  # Smaller batch size
    gradient_accumulation_steps=8,  # Accumulate gradients to simulate larger batch size
    num_train_epochs=3,
    logging_dir="./logs",
    logging_strategy="steps",
    save_strategy="epoch",
    eval_strategy="epoch",
    learning_rate=5e-5,
    overwrite_output_dir=True,
)

data_collator = DataCollatorForSeq2Seq(
    tokenizer,
    model=model,
    padding=True,  # Enable dynamic padding
    return_tensors="pt"
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset["train"],
    eval_dataset=tokenized_dataset["train"],
    data_collator=data_collator,  # Use dynamic padding
)

# Train the model
trainer.train()

# Save the fine-tuned model
trainer.save_model("./fine_tuned_model")
tokenizer.save_pretrained("./fine_tuned_model")

# Load the fine-tuned model for inference
fine_tuned_model = AutoModelForCausalLM.from_pretrained("./fine_tuned_model")
fine_tuned_tokenizer = AutoTokenizer.from_pretrained("./fine_tuned_model")

# Define a Gradio interface for testing the model
def generate_cypress_code(prompt):
    inputs = fine_tuned_tokenizer(prompt, return_tensors="pt")
    outputs = fine_tuned_model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1)
    return fine_tuned_tokenizer.decode(outputs[0], skip_special_tokens=True)

# Launch the Gradio interface
interface = gr.Interface(
    fn=generate_cypress_code,
    inputs="text",
    outputs="text",
    title="Cypress Test Generator",
    description="Enter a description of the test you want to generate Cypress code for.",
)
interface.launch()