from fastapi import FastAPI from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline tokenizer = AutoTokenizer.from_pretrained("mayanklad/faq-canada-immigration-tokenizer") model = AutoModelForCausalLM.from_pretrained("mayanklad/faq-canada-immigration") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) app = FastAPI() # Define a function to handle the GET request at `/generate` # The generate() function is defined as a FastAPI route that takes a # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response # containing the generated text under the key "output" @app.get("/generate") def generate(text: str): """ Using the text-generation pipeline from `transformers`, generate text from the given input text. The model used is `mayanklad/faq-canada-immigration`, which can be found [here](). """ # Use the pipeline to generate text from the given input text output = pipe(text) # Return the generated text in a JSON response return {"output": output[0]["generated_text"]}