AurelioAguirre commited on
Commit
5d274cb
·
1 Parent(s): c8519eb

running dummy App

Browse files
Files changed (2) hide show
  1. main/_app.py +149 -0
  2. main/app.py +13 -142
main/_app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import sys
3
+ from fastapi import FastAPI
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ import uvicorn
6
+ from .api import LLMApi
7
+ from .routes import router, init_router
8
+ from .utils.logging import setup_logger
9
+ from huggingface_hub import login
10
+ from pathlib import Path
11
+ from dotenv import load_dotenv
12
+ import os
13
+
14
+ def validate_hf():
15
+ """
16
+ Validate Hugging Face authentication.
17
+ Checks for .env file, loads environment variables, and attempts HF login if token exists.
18
+ """
19
+ logger = setup_logger(config, "hf_validation")
20
+
21
+ # Check for .env file
22
+ env_path = Path('.env')
23
+ if env_path.exists():
24
+ logger.info("Found .env file, loading environment variables")
25
+ load_dotenv()
26
+ else:
27
+ logger.warning("No .env file found. Fine if you're on Huggingface, but you need one to run locally on your PC.")
28
+
29
+ # Check for HF token
30
+ hf_token = os.getenv('HF_TOKEN')
31
+ if not hf_token:
32
+ logger.error("No HF_TOKEN found in environment variables")
33
+ return False
34
+
35
+ try:
36
+ # Attempt login
37
+ login(token=hf_token)
38
+ logger.info("Successfully authenticated with Hugging Face")
39
+ return True
40
+ except Exception as e:
41
+ logger.error(f"Failed to authenticate with Hugging Face: {str(e)}")
42
+ return False
43
+
44
+ def load_config():
45
+ """Load configuration from yaml file"""
46
+ with open("main/config.yaml", "r") as f:
47
+ return yaml.safe_load(f)
48
+
49
+ def create_app():
50
+ config = load_config()
51
+ logger = setup_logger(config, "main")
52
+ logger.info("Starting LLM API server")
53
+
54
+ app = FastAPI(
55
+ title="LLM API",
56
+ description="API for Large Language Model operations",
57
+ version=config["api"]["version"]
58
+ )
59
+
60
+ # Add CORS middleware
61
+ app.add_middleware(
62
+ CORSMiddleware,
63
+ allow_origins=config["api"]["cors"]["origins"],
64
+ allow_credentials=config["api"]["cors"]["credentials"],
65
+ allow_methods=["*"],
66
+ allow_headers=["*"],
67
+ )
68
+
69
+ # Initialize routes with config
70
+ init_router(config)
71
+
72
+ app.include_router(router, prefix=f"{config['api']['prefix']}/{config['api']['version']}")
73
+
74
+ logger.info("FastAPI application created successfully")
75
+ return app
76
+
77
+ def test_locally():
78
+ """Run local tests for development and debugging"""
79
+ config = load_config()
80
+ logger = setup_logger(config, "test")
81
+ logger.info("Starting local tests")
82
+
83
+ api = LLMApi(config)
84
+ model_name = config["model"]["defaults"]["model_name"]
85
+
86
+ logger.info(f"Testing with model: {model_name}")
87
+
88
+ # Test download
89
+ logger.info("Testing model download...")
90
+ api.download_model(model_name)
91
+ logger.info("Download complete")
92
+
93
+ # Test initialization
94
+ logger.info("Initializing model...")
95
+ api.initialize_model(model_name)
96
+ logger.info("Model initialized")
97
+
98
+ # Test embedding
99
+ test_text = "Dette er en test av embeddings generering fra en teknisk tekst om HMS rutiner på arbeidsplassen."
100
+ logger.info("Testing embedding generation...")
101
+ embedding = api.generate_embedding(test_text)
102
+ logger.info(f"Generated embedding of length: {len(embedding)}")
103
+ logger.info(f"First few values: {embedding[:5]}")
104
+
105
+ # Test generation
106
+ test_prompts = [
107
+ "Tell me what happens in a nuclear reactor.",
108
+ ]
109
+
110
+ # Test regular generation
111
+ logger.info("Testing regular generation:")
112
+ for prompt in test_prompts:
113
+ logger.info(f"Prompt: {prompt}")
114
+ response = api.generate_response(
115
+ prompt=prompt,
116
+ system_message="You are a helpful assistant."
117
+ )
118
+ logger.info(f"Response: {response}")
119
+
120
+ # Test streaming generation
121
+ logger.info("Testing streaming generation:")
122
+ logger.info(f"Prompt: {test_prompts[0]}")
123
+ for chunk in api.generate_stream(
124
+ prompt=test_prompts[0],
125
+ system_message="You are a helpful assistant."
126
+ ):
127
+ print(chunk, end="", flush=True)
128
+ print("\n")
129
+
130
+ logger.info("Local tests completed")
131
+
132
+ app = create_app()
133
+
134
+ if __name__ == "__main__":
135
+ config = load_config()
136
+ #validate_hf()
137
+ if len(sys.argv) > 1 and sys.argv[1] == "test":
138
+ test_locally()
139
+ else:
140
+ uvicorn.run(
141
+ "main.app:app",
142
+ host=config["server"]["host"],
143
+ port=config["server"]["port"],
144
+ log_level="trace",
145
+ reload=True,
146
+ workers=1,
147
+ access_log=False,
148
+ use_colors=True
149
+ )
main/app.py CHANGED
@@ -1,149 +1,20 @@
1
- import yaml
2
- import sys
3
  from fastapi import FastAPI
4
- from fastapi.middleware.cors import CORSMiddleware
5
  import uvicorn
6
- from .api import LLMApi
7
- from .routes import router, init_router
8
- from .utils.logging import setup_logger
9
- from huggingface_hub import login
10
- from pathlib import Path
11
- from dotenv import load_dotenv
12
- import os
13
 
14
- def validate_hf():
15
- """
16
- Validate Hugging Face authentication.
17
- Checks for .env file, loads environment variables, and attempts HF login if token exists.
18
- """
19
- logger = setup_logger(config, "hf_validation")
20
 
21
- # Check for .env file
22
- env_path = Path('.env')
23
- if env_path.exists():
24
- logger.info("Found .env file, loading environment variables")
25
- load_dotenv()
26
- else:
27
- logger.warning("No .env file found. Fine if you're on Huggingface, but you need one to run locally on your PC.")
28
 
29
- # Check for HF token
30
- hf_token = os.getenv('HF_TOKEN')
31
- if not hf_token:
32
- logger.error("No HF_TOKEN found in environment variables")
33
- return False
34
-
35
- try:
36
- # Attempt login
37
- login(token=hf_token)
38
- logger.info("Successfully authenticated with Hugging Face")
39
- return True
40
- except Exception as e:
41
- logger.error(f"Failed to authenticate with Hugging Face: {str(e)}")
42
- return False
43
-
44
- def load_config():
45
- """Load configuration from yaml file"""
46
- with open("main/config.yaml", "r") as f:
47
- return yaml.safe_load(f)
48
-
49
- def create_app():
50
- config = load_config()
51
- logger = setup_logger(config, "main")
52
- logger.info("Starting LLM API server")
53
-
54
- app = FastAPI(
55
- title="LLM API",
56
- description="API for Large Language Model operations",
57
- version=config["api"]["version"]
58
- )
59
-
60
- # Add CORS middleware
61
- app.add_middleware(
62
- CORSMiddleware,
63
- allow_origins=config["api"]["cors"]["origins"],
64
- allow_credentials=config["api"]["cors"]["credentials"],
65
- allow_methods=["*"],
66
- allow_headers=["*"],
67
- )
68
-
69
- # Initialize routes with config
70
- init_router(config)
71
-
72
- app.include_router(router, prefix=f"{config['api']['prefix']}/{config['api']['version']}")
73
-
74
- logger.info("FastAPI application created successfully")
75
- return app
76
-
77
- def test_locally():
78
- """Run local tests for development and debugging"""
79
- config = load_config()
80
- logger = setup_logger(config, "test")
81
- logger.info("Starting local tests")
82
-
83
- api = LLMApi(config)
84
- model_name = config["model"]["defaults"]["model_name"]
85
-
86
- logger.info(f"Testing with model: {model_name}")
87
-
88
- # Test download
89
- logger.info("Testing model download...")
90
- api.download_model(model_name)
91
- logger.info("Download complete")
92
-
93
- # Test initialization
94
- logger.info("Initializing model...")
95
- api.initialize_model(model_name)
96
- logger.info("Model initialized")
97
-
98
- # Test embedding
99
- test_text = "Dette er en test av embeddings generering fra en teknisk tekst om HMS rutiner på arbeidsplassen."
100
- logger.info("Testing embedding generation...")
101
- embedding = api.generate_embedding(test_text)
102
- logger.info(f"Generated embedding of length: {len(embedding)}")
103
- logger.info(f"First few values: {embedding[:5]}")
104
-
105
- # Test generation
106
- test_prompts = [
107
- "Tell me what happens in a nuclear reactor.",
108
- ]
109
-
110
- # Test regular generation
111
- logger.info("Testing regular generation:")
112
- for prompt in test_prompts:
113
- logger.info(f"Prompt: {prompt}")
114
- response = api.generate_response(
115
- prompt=prompt,
116
- system_message="You are a helpful assistant."
117
- )
118
- logger.info(f"Response: {response}")
119
-
120
- # Test streaming generation
121
- logger.info("Testing streaming generation:")
122
- logger.info(f"Prompt: {test_prompts[0]}")
123
- for chunk in api.generate_stream(
124
- prompt=test_prompts[0],
125
- system_message="You are a helpful assistant."
126
- ):
127
- print(chunk, end="", flush=True)
128
- print("\n")
129
-
130
- logger.info("Local tests completed")
131
-
132
- app = create_app()
133
 
134
  if __name__ == "__main__":
135
- config = load_config()
136
- #validate_hf()
137
- if len(sys.argv) > 1 and sys.argv[1] == "test":
138
- test_locally()
139
- else:
140
- uvicorn.run(
141
- "main.app:app",
142
- host=config["server"]["host"],
143
- port=config["server"]["port"],
144
- log_level="trace",
145
- reload=True,
146
- workers=1,
147
- access_log=False,
148
- use_colors=True
149
- )
 
 
 
1
  from fastapi import FastAPI
 
2
  import uvicorn
 
 
 
 
 
 
 
3
 
4
+ app = FastAPI()
 
 
 
 
 
5
 
6
+ @app.get("/")
7
+ async def root():
8
+ return {"message": "Server is running"}
 
 
 
 
9
 
10
+ @app.get("/health")
11
+ async def health_check():
12
+ return {"status": "healthy"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  if __name__ == "__main__":
15
+ uvicorn.run(
16
+ "main.app:app",
17
+ host="0.0.0.0",
18
+ port=7680,
19
+ workers=1
20
+ )