Spaces:
Running
Running
amaye15
commited on
Commit
·
f9ce179
1
Parent(s):
01e7bff
Feat - Deployment
Browse files- .gitignore +4 -0
- Dockerfile +70 -0
- README.md +3 -0
- api/main.py +288 -0
- api/model/saved_tuned_model.pkl +3 -0
- requirements.txt +6 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*env*
|
2 |
+
*DS_Store*
|
3 |
+
*.log
|
4 |
+
*__pycache__*
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Stage 1: Build stage
|
2 |
+
FROM python:3.12-slim as builder
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
6 |
+
PYTHONUNBUFFERED=1 \
|
7 |
+
PATH="/root/.local/bin:$PATH"
|
8 |
+
|
9 |
+
# Install system dependencies (curl and ca-certificates for uv installer)
|
10 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
11 |
+
curl \
|
12 |
+
ca-certificates \
|
13 |
+
&& rm -rf /var/lib/apt/lists/*
|
14 |
+
|
15 |
+
# Install uv using the official installer
|
16 |
+
RUN curl -sSfL https://astral.sh/uv/install.sh | sh
|
17 |
+
|
18 |
+
# Verify uv is installed and available
|
19 |
+
RUN uv --version
|
20 |
+
|
21 |
+
# Create a non-root user
|
22 |
+
RUN useradd -m -u 1000 user
|
23 |
+
|
24 |
+
# Set the working directory
|
25 |
+
WORKDIR /app
|
26 |
+
|
27 |
+
# Create a virtual environment
|
28 |
+
RUN uv venv /opt/venv
|
29 |
+
|
30 |
+
# Update PATH to include the virtual environment's bin directory
|
31 |
+
ENV PATH="/opt/venv/bin:$PATH"
|
32 |
+
|
33 |
+
# Copy only the requirements file first to leverage Docker cache
|
34 |
+
COPY --chown=user ./requirements.txt /app/requirements.txt
|
35 |
+
|
36 |
+
# Install dependencies into the virtual environment using uv
|
37 |
+
RUN uv pip install --no-cache-dir -r requirements.txt
|
38 |
+
|
39 |
+
# Copy the rest of the application code
|
40 |
+
COPY --chown=user . /app
|
41 |
+
|
42 |
+
# Stage 2: Runtime stage
|
43 |
+
FROM python:3.12-slim
|
44 |
+
|
45 |
+
# Create a non-root user
|
46 |
+
RUN useradd -m -u 1000 user
|
47 |
+
USER user
|
48 |
+
|
49 |
+
# Set environment variables
|
50 |
+
ENV PATH="/opt/venv/bin:$PATH" \
|
51 |
+
PYTHONUNBUFFERED=1
|
52 |
+
|
53 |
+
# Set the working directory
|
54 |
+
WORKDIR /app
|
55 |
+
|
56 |
+
# Copy the virtual environment from the builder stage
|
57 |
+
COPY --from=builder --chown=user /opt/venv /opt/venv
|
58 |
+
|
59 |
+
# Copy only the necessary files from the builder stage
|
60 |
+
COPY --from=builder --chown=user /app /app
|
61 |
+
|
62 |
+
# Expose the port the app runs on
|
63 |
+
EXPOSE 7860
|
64 |
+
|
65 |
+
# Health check to ensure the application is running
|
66 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
67 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
68 |
+
|
69 |
+
# Command to run the application with hot reloading
|
70 |
+
CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]
|
README.md
CHANGED
@@ -6,6 +6,9 @@ colorTo: green
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: mit
|
|
|
|
|
|
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: mit
|
9 |
+
python_version: 3.11
|
10 |
+
app_port: 7860
|
11 |
+
app_file: api/main.py
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
api/main.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from fastapi import FastAPI, HTTPException, status, Depends
|
2 |
+
# from fastapi.responses import RedirectResponse
|
3 |
+
# from pydantic import BaseModel, conlist
|
4 |
+
# import pandas as pd
|
5 |
+
# from pycaret.classification import load_model, predict_model
|
6 |
+
# import logging
|
7 |
+
# from typing import Optional
|
8 |
+
# import numpy as np
|
9 |
+
# import os
|
10 |
+
|
11 |
+
# # Constants
|
12 |
+
# MODEL_PATH = "./api/model/saved_tuned_model" # os.getenv("MODEL_PATH", "saved_tuned_model") # Load model path from environment variable
|
13 |
+
# EMBEDDING_DIMENSION = 1024 # Update this to match your model's expected input dimension
|
14 |
+
# LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO") # Logging level from environment variable
|
15 |
+
|
16 |
+
# # Configure logging
|
17 |
+
# logging.basicConfig(level=LOG_LEVEL)
|
18 |
+
# logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
|
21 |
+
# # Load the saved model
|
22 |
+
# def load_tuned_model(model_path: str):
|
23 |
+
# """Load the pre-trained model from the specified path."""
|
24 |
+
# try:
|
25 |
+
# logger.info(f"Loading model from {model_path}...")
|
26 |
+
# model = load_model(model_path)
|
27 |
+
# logger.info("Model loaded successfully.")
|
28 |
+
# return model
|
29 |
+
# except Exception as e:
|
30 |
+
# logger.error(f"Failed to load the model: {str(e)}")
|
31 |
+
# raise RuntimeError(f"Model loading failed: {str(e)}")
|
32 |
+
|
33 |
+
|
34 |
+
# tuned_model = load_tuned_model(MODEL_PATH)
|
35 |
+
|
36 |
+
|
37 |
+
# # Define the input data model using Pydantic
|
38 |
+
# class EmbeddingRequest(BaseModel):
|
39 |
+
# embedding: conlist(
|
40 |
+
# float, min_length=EMBEDDING_DIMENSION, max_length=EMBEDDING_DIMENSION
|
41 |
+
# )
|
42 |
+
|
43 |
+
|
44 |
+
# # Define the response model
|
45 |
+
# class PredictionResponse(BaseModel):
|
46 |
+
# predicted_label: int
|
47 |
+
# predicted_score: float
|
48 |
+
|
49 |
+
|
50 |
+
# # Initialize FastAPI app
|
51 |
+
# app = FastAPI(
|
52 |
+
# title="Embedding Prediction API",
|
53 |
+
# description="API for predicting labels and scores from embeddings using a pre-trained model.",
|
54 |
+
# version="1.0.0",
|
55 |
+
# )
|
56 |
+
|
57 |
+
|
58 |
+
# # Dependency for model access
|
59 |
+
# def get_model():
|
60 |
+
# """Dependency to provide the loaded model to endpoints."""
|
61 |
+
# return tuned_model
|
62 |
+
|
63 |
+
|
64 |
+
# # Define the prediction endpoint
|
65 |
+
# @app.post("/predict", response_model=PredictionResponse)
|
66 |
+
# async def predict(
|
67 |
+
# request: EmbeddingRequest,
|
68 |
+
# model=Depends(get_model),
|
69 |
+
# ):
|
70 |
+
# """
|
71 |
+
# Predicts the label and score for a given embedding.
|
72 |
+
|
73 |
+
# Args:
|
74 |
+
# request (EmbeddingRequest): A request containing the embedding as a list of floats.
|
75 |
+
# model: The pre-trained model injected via dependency.
|
76 |
+
|
77 |
+
# Returns:
|
78 |
+
# PredictionResponse: A response containing the predicted label and score.
|
79 |
+
# """
|
80 |
+
# try:
|
81 |
+
# logger.info("Received prediction request.")
|
82 |
+
|
83 |
+
# # Convert the input embedding to a DataFrame
|
84 |
+
# input_data = pd.DataFrame(
|
85 |
+
# [request.embedding],
|
86 |
+
# columns=[f"embedding_{i}" for i in range(EMBEDDING_DIMENSION)],
|
87 |
+
# )
|
88 |
+
|
89 |
+
# # Make a prediction using the loaded model
|
90 |
+
# logger.info("Making prediction...")
|
91 |
+
# prediction = predict_model(model, data=input_data)
|
92 |
+
|
93 |
+
# # Extract the predicted label and score
|
94 |
+
# predicted_label = prediction["prediction_label"].iloc[0]
|
95 |
+
# predicted_score = prediction["prediction_score"].iloc[0]
|
96 |
+
|
97 |
+
# logger.info(
|
98 |
+
# f"Prediction successful: label={predicted_label}, score={predicted_score}"
|
99 |
+
# )
|
100 |
+
|
101 |
+
# return PredictionResponse(
|
102 |
+
# predicted_label=int(predicted_label),
|
103 |
+
# predicted_score=float(predicted_score),
|
104 |
+
# )
|
105 |
+
|
106 |
+
# except Exception as e:
|
107 |
+
# logger.error(f"Prediction failed: {str(e)}")
|
108 |
+
# raise HTTPException(
|
109 |
+
# status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
110 |
+
# detail=f"An error occurred during prediction: {str(e)}",
|
111 |
+
# )
|
112 |
+
|
113 |
+
|
114 |
+
# # Health check endpoint
|
115 |
+
# @app.get("/health", status_code=status.HTTP_200_OK)
|
116 |
+
# async def health_check():
|
117 |
+
# """Health check endpoint to verify the API is running."""
|
118 |
+
# return {"status": "healthy"}
|
119 |
+
|
120 |
+
|
121 |
+
# # Run the FastAPI app
|
122 |
+
# if __name__ == "__main__":
|
123 |
+
# import uvicorn
|
124 |
+
|
125 |
+
# uvicorn.run(app, host="0.0.0.0", port=8000)
|
126 |
+
|
127 |
+
|
128 |
+
from fastapi import FastAPI, HTTPException, status, Depends
|
129 |
+
from fastapi.responses import RedirectResponse
|
130 |
+
from pydantic import BaseModel, conlist, ValidationError
|
131 |
+
from pydantic_settings import BaseSettings
|
132 |
+
import pandas as pd
|
133 |
+
from pycaret.classification import load_model, predict_model
|
134 |
+
import logging
|
135 |
+
from typing import Optional, List
|
136 |
+
import numpy as np
|
137 |
+
import os
|
138 |
+
|
139 |
+
# Configure structured logging
|
140 |
+
logging.basicConfig(
|
141 |
+
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
142 |
+
)
|
143 |
+
logger = logging.getLogger(__name__)
|
144 |
+
|
145 |
+
|
146 |
+
# Define settings using Pydantic BaseSettings
|
147 |
+
class Settings(BaseSettings):
|
148 |
+
model_path: str = "./api/model/saved_tuned_model"
|
149 |
+
embedding_dimension: int = 1024
|
150 |
+
log_level: str = "INFO"
|
151 |
+
|
152 |
+
class Config:
|
153 |
+
env_file = ".env"
|
154 |
+
env_file_encoding = "utf-8"
|
155 |
+
|
156 |
+
|
157 |
+
settings = Settings()
|
158 |
+
|
159 |
+
|
160 |
+
# Load the saved model
|
161 |
+
def load_tuned_model(model_path: str):
|
162 |
+
"""Load the pre-trained model from the specified path."""
|
163 |
+
try:
|
164 |
+
logger.info(f"Loading model from {model_path}...")
|
165 |
+
model = load_model(model_path)
|
166 |
+
logger.info("Model loaded successfully.")
|
167 |
+
return model
|
168 |
+
except Exception as e:
|
169 |
+
logger.error(f"Failed to load the model: {str(e)}")
|
170 |
+
raise RuntimeError(f"Model loading failed: {str(e)}")
|
171 |
+
|
172 |
+
|
173 |
+
tuned_model = load_tuned_model(settings.model_path)
|
174 |
+
|
175 |
+
|
176 |
+
# Define the input data model using Pydantic
|
177 |
+
class EmbeddingRequest(BaseModel):
|
178 |
+
embedding: conlist(
|
179 |
+
float,
|
180 |
+
min_length=settings.embedding_dimension,
|
181 |
+
max_length=settings.embedding_dimension,
|
182 |
+
)
|
183 |
+
|
184 |
+
|
185 |
+
# Define the response model
|
186 |
+
class PredictionResponse(BaseModel):
|
187 |
+
predicted_label: int
|
188 |
+
predicted_score: float
|
189 |
+
|
190 |
+
|
191 |
+
# Initialize FastAPI app
|
192 |
+
app = FastAPI(
|
193 |
+
title="Embedding Prediction API",
|
194 |
+
description="API for predicting labels and scores from embeddings using a pre-trained model.",
|
195 |
+
version="1.0.0",
|
196 |
+
)
|
197 |
+
|
198 |
+
|
199 |
+
# Dependency for model access
|
200 |
+
def get_model():
|
201 |
+
"""Dependency to provide the loaded model to endpoints."""
|
202 |
+
return tuned_model
|
203 |
+
|
204 |
+
|
205 |
+
# Define the prediction endpoint
|
206 |
+
@app.post("/predict", response_model=PredictionResponse)
|
207 |
+
async def predict(
|
208 |
+
request: EmbeddingRequest,
|
209 |
+
model=Depends(get_model),
|
210 |
+
):
|
211 |
+
"""
|
212 |
+
Predicts the label and score for a given embedding.
|
213 |
+
|
214 |
+
Args:
|
215 |
+
request (EmbeddingRequest): A request containing the embedding as a list of floats.
|
216 |
+
model: The pre-trained model injected via dependency.
|
217 |
+
|
218 |
+
Returns:
|
219 |
+
PredictionResponse: A response containing the predicted label and score.
|
220 |
+
"""
|
221 |
+
try:
|
222 |
+
logger.info("Received prediction request.")
|
223 |
+
|
224 |
+
# Convert the input embedding to a DataFrame
|
225 |
+
input_data = pd.DataFrame(
|
226 |
+
[request.embedding],
|
227 |
+
columns=[f"embedding_{i}" for i in range(settings.embedding_dimension)],
|
228 |
+
)
|
229 |
+
|
230 |
+
# Make a prediction using the loaded model
|
231 |
+
logger.info("Making prediction...")
|
232 |
+
prediction = predict_model(model, data=input_data)
|
233 |
+
|
234 |
+
# Validate the prediction output
|
235 |
+
if (
|
236 |
+
"prediction_label" not in prediction.columns
|
237 |
+
or "prediction_score" not in prediction.columns
|
238 |
+
):
|
239 |
+
raise ValueError("Model prediction output is missing required columns.")
|
240 |
+
|
241 |
+
# Extract the predicted label and score
|
242 |
+
predicted_label = prediction["prediction_label"].iloc[0]
|
243 |
+
predicted_score = prediction["prediction_score"].iloc[0]
|
244 |
+
|
245 |
+
if predicted_label == 3:
|
246 |
+
predicted_label = 4
|
247 |
+
|
248 |
+
logger.info(
|
249 |
+
f"Prediction successful: label={predicted_label}, score={predicted_score}"
|
250 |
+
)
|
251 |
+
|
252 |
+
return PredictionResponse(
|
253 |
+
predicted_label=int(predicted_label),
|
254 |
+
predicted_score=float(predicted_score),
|
255 |
+
)
|
256 |
+
|
257 |
+
except ValidationError as e:
|
258 |
+
logger.error(f"Validation error: {str(e)}")
|
259 |
+
raise HTTPException(
|
260 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
261 |
+
detail=f"Invalid input data: {str(e)}",
|
262 |
+
)
|
263 |
+
except ValueError as e:
|
264 |
+
logger.error(f"Value error: {str(e)}")
|
265 |
+
raise HTTPException(
|
266 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
267 |
+
detail=f"Model output validation failed: {str(e)}",
|
268 |
+
)
|
269 |
+
except Exception as e:
|
270 |
+
logger.error(f"Prediction failed: {str(e)}")
|
271 |
+
raise HTTPException(
|
272 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
273 |
+
detail=f"An error occurred during prediction: {str(e)}",
|
274 |
+
)
|
275 |
+
|
276 |
+
|
277 |
+
# Health check endpoint
|
278 |
+
@app.get("/health", status_code=status.HTTP_200_OK)
|
279 |
+
async def health_check():
|
280 |
+
"""Health check endpoint to verify the API is running."""
|
281 |
+
return {"status": "healthy"}
|
282 |
+
|
283 |
+
|
284 |
+
# Run the FastAPI app
|
285 |
+
if __name__ == "__main__":
|
286 |
+
import uvicorn
|
287 |
+
|
288 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
api/model/saved_tuned_model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ebc64beeeb89ba17af4b68dd13e322ae5a411f858f4816435f8db96c7d46e71
|
3 |
+
size 4142287
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pycaret==3.3.2
|
2 |
+
fastapi==0.115.7
|
3 |
+
uvicorn==0.34.0
|
4 |
+
xgboost==2.1.3
|
5 |
+
dill==0.3.9
|
6 |
+
pydantic-settings==2.7.1
|