File size: 474 Bytes
ad300e3
 
bc906a4
 
ad300e3
bc906a4
ad300e3
 
bc906a4
ad300e3
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
from vllm import LLM, SamplingParams

# Initialize the LLM with CPU-only mode
llm = LLM(model="Tann-dev/sex-chat-dirty-girlfriend", device="cpu")

# Set up sampling parameters
sampling_params = SamplingParams(temperature=0.7, max_tokens=50)

# Define a prompt to generate text
prompt = "Hello, how can I help you?"

# Generate text from the model
output = llm.generate([prompt], sampling_params=sampling_params)

# Print the output
print(f"Generated text: {output[0].text}")