Spaces:
Running
Running
Initial commit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- .env.example +9 -0
- .env.spaces +18 -0
- .gitattributes +8 -6
- .gitignore +57 -0
- README.md +35 -13
- app.py +36 -0
- app/.DS_Store +0 -0
- app/config/execution.json +34 -0
- app/config/manifest.json +11 -0
- app/config/properties.json +6 -0
- app/config/state.json +8 -0
- app/core/.DS_Store +0 -0
- app/core/__init__.py +0 -0
- app/core/image_to_3d.py +522 -0
- app/core/pipeline.py +344 -0
- app/core/remote.py +106 -0
- app/core/stub.py +137 -0
- app/core/text_to_image.py +259 -0
- app/data/.DS_Store +0 -0
- app/llm/.DS_Store +0 -0
- app/llm/__init__.py +0 -0
- app/llm/client.py +105 -0
- app/llm/model.py +237 -0
- app/llm/service.py +213 -0
- app/main.py +113 -0
- app/ontology_dc8f06af066e4a7880a5938933236037/__init__.py +0 -0
- app/ontology_dc8f06af066e4a7880a5938933236037/config.py +25 -0
- app/ontology_dc8f06af066e4a7880a5938933236037/input.py +28 -0
- app/ontology_dc8f06af066e4a7880a5938933236037/output.py +25 -0
- app/tools/__init__.py +0 -0
- app/tools/blob_viewer.py +216 -0
- app/ui/app.py +407 -0
- onto/.DS_Store +0 -0
- onto/dc8f06af066e4a7880a5938933236037/.DS_Store +0 -0
- onto/dc8f06af066e4a7880a5938933236037/connection/ConfigClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/connection/InputClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/connection/OutputClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/defaults/ConfigClass.json +15 -0
- onto/dc8f06af066e4a7880a5938933236037/defaults/InputClass.json +15 -0
- onto/dc8f06af066e4a7880a5938933236037/defaults/OutputClass.json +15 -0
- onto/dc8f06af066e4a7880a5938933236037/encoding/ConfigClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/encoding/InputClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/encoding/OutputClass.json +10 -0
- onto/dc8f06af066e4a7880a5938933236037/instruction/ConfigClass.json +14 -0
- onto/dc8f06af066e4a7880a5938933236037/instruction/InputClass.json +14 -0
- onto/dc8f06af066e4a7880a5938933236037/instruction/OutputClass.json +14 -0
- onto/dc8f06af066e4a7880a5938933236037/meta.json +42 -0
- onto/dc8f06af066e4a7880a5938933236037/naming/ConfigClass.json +12 -0
- onto/dc8f06af066e4a7880a5938933236037/naming/InputClass.json +12 -0
.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
.env.example
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Hugging Face token for downloading models
|
2 |
+
HF_TOKEN=your_huggingface_token_here
|
3 |
+
|
4 |
+
# Model configuration
|
5 |
+
MODEL_ID=meta-llama/Llama-3.2-3B-Instruct
|
6 |
+
|
7 |
+
# Application IDs
|
8 |
+
TEXT_TO_IMAGE_APP_ID=c25dcd829d134ea98f5ae4dd311d13bc.node3.openfabric.network
|
9 |
+
IMAGE_TO_3D_APP_ID=f0b5f319156c4819b9827000b17e511a.node3.openfabric.network
|
.env.spaces
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Environment variables for Hugging Face Spaces deployment
|
2 |
+
HTTPX_VERIFY=0
|
3 |
+
UI_PORT=7860
|
4 |
+
HF_SPACES=1
|
5 |
+
|
6 |
+
# Openfabric App IDs
|
7 |
+
TEXT_TO_IMAGE_APP_ID=c25dcd829d134ea98f5ae4dd311d13bc.node3.openfabric.network
|
8 |
+
IMAGE_TO_3D_APP_ID=f0b5f319156c4819b9827000b17e511a.node3.openfabric.network
|
9 |
+
|
10 |
+
# LLM Configuration for Spaces - use a hosted model instead of local files
|
11 |
+
MODEL_ID=meta-llama/Llama-3.2-3B-Instruct
|
12 |
+
|
13 |
+
# Data Directories (Spaces-friendly paths)
|
14 |
+
IMAGE_OUTPUT_DIR=/tmp/data/images
|
15 |
+
MODEL_OUTPUT_DIR=/tmp/data/models
|
16 |
+
|
17 |
+
# Log level
|
18 |
+
LOG_LEVEL=INFO
|
.gitattributes
CHANGED
@@ -1,6 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
@@ -13,16 +20,13 @@
|
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
*.tar filter=lfs diff=lfs merge=lfs -text
|
@@ -34,5 +38,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.glb filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.gltf filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.glb filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.gltf filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.gguf filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
8 |
+
app/llm/models/** filter=lfs diff=lfs merge=lfs -text
|
9 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
10 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
|
11 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
12 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
13 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
|
|
20 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
21 |
*.npy filter=lfs diff=lfs merge=lfs -text
|
22 |
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
|
23 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
24 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
25 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
26 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
27 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
28 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
|
|
29 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
|
30 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
31 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
32 |
*.tar filter=lfs diff=lfs merge=lfs -text
|
|
|
38 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
39 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
40 |
*.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Data directories with large files
|
3 |
+
app/data/images/*.png
|
4 |
+
app/data/images/*.jpg
|
5 |
+
app/data/images/*.json
|
6 |
+
app/data/models/*.glb
|
7 |
+
app/data/models/*.gltf
|
8 |
+
app/data/models/*.json
|
9 |
+
app/data/downloads/*
|
10 |
+
|
11 |
+
# Exclude LLM models directory (contains large 12.8GB model)
|
12 |
+
app/llm/models/**
|
13 |
+
|
14 |
+
# Keep directory structure
|
15 |
+
!app/data/images/.gitkeep
|
16 |
+
!app/data/models/.gitkeep
|
17 |
+
!app/data/downloads/.gitkeep
|
18 |
+
!app/llm/models/.gitkeep
|
19 |
+
|
20 |
+
# Python artifacts
|
21 |
+
__pycache__/
|
22 |
+
*.py[cod]
|
23 |
+
*$py.class
|
24 |
+
*.so
|
25 |
+
.Python
|
26 |
+
build/
|
27 |
+
develop-eggs/
|
28 |
+
dist/
|
29 |
+
downloads/
|
30 |
+
eggs/
|
31 |
+
.eggs/
|
32 |
+
lib/
|
33 |
+
lib64/
|
34 |
+
parts/
|
35 |
+
sdist/
|
36 |
+
var/
|
37 |
+
wheels/
|
38 |
+
*.egg-info/
|
39 |
+
.installed.cfg
|
40 |
+
*.egg
|
41 |
+
|
42 |
+
# Environments
|
43 |
+
.env
|
44 |
+
.venv
|
45 |
+
env/
|
46 |
+
venv/
|
47 |
+
ENV/
|
48 |
+
|
49 |
+
# VS Code settings
|
50 |
+
.vscode/
|
51 |
+
|
52 |
+
# Jupyter Notebook
|
53 |
+
.ipynb_checkpoints
|
54 |
+
|
55 |
+
# Logs
|
56 |
+
*.log
|
57 |
+
*.out
|
README.md
CHANGED
@@ -1,13 +1,35 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AI Creative Studio
|
2 |
+
|
3 |
+
An AI-powered creative application that combines text-to-image and image-to-3D model generation.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- Text-to-Image generation using Openfabric AI services
|
8 |
+
- Image-to-3D model conversion
|
9 |
+
- Interactive UI built with Gradio 4.44
|
10 |
+
- Support for local LLM integration for prompt enhancement
|
11 |
+
|
12 |
+
## Setup and Usage
|
13 |
+
|
14 |
+
### Local Development
|
15 |
+
|
16 |
+
1. Clone this repository
|
17 |
+
2. Copy `.env.example` to `.env` and configure your environment variables
|
18 |
+
3. Install dependencies: `pip install -r requirements.txt`
|
19 |
+
4. Run the UI: `python app/ui/app.py`
|
20 |
+
|
21 |
+
### Hugging Face Spaces Deployment
|
22 |
+
|
23 |
+
This project is configured to run on Hugging Face Spaces. The main entry point for Spaces is the root `app.py` file.
|
24 |
+
|
25 |
+
## Environment Variables
|
26 |
+
|
27 |
+
Key environment variables:
|
28 |
+
|
29 |
+
- `TEXT_TO_IMAGE_APP_ID`: Openfabric service ID for text-to-image generation
|
30 |
+
- `IMAGE_TO_3D_APP_ID`: Openfabric service ID for image-to-3D conversion
|
31 |
+
- `HF_TOKEN`: Hugging Face token for accessing models (optional in Spaces if using public models)
|
32 |
+
|
33 |
+
## License
|
34 |
+
|
35 |
+
This project is licensed under the MIT License.
|
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
"""
|
3 |
+
AI Creative Studio - Hugging Face Spaces Entry Point
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import logging
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
# Configure logging
|
12 |
+
logging.basicConfig(
|
13 |
+
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
14 |
+
)
|
15 |
+
logger = logging.getLogger("huggingface-spaces")
|
16 |
+
|
17 |
+
# Make sure app directory is in the path
|
18 |
+
app_dir = Path(__file__).parent / "app"
|
19 |
+
sys.path.append(str(app_dir))
|
20 |
+
|
21 |
+
# Set environment variables for Spaces
|
22 |
+
os.environ["HTTPX_VERIFY"] = "0"
|
23 |
+
os.environ["UI_PORT"] = "7860" # Standard Spaces port
|
24 |
+
os.environ["HF_SPACES"] = "1" # Flag to indicate we're running in Spaces
|
25 |
+
|
26 |
+
# Import UI module
|
27 |
+
try:
|
28 |
+
from app.ui.app import main as ui_main
|
29 |
+
logger.info("Successfully imported UI module")
|
30 |
+
except ImportError as e:
|
31 |
+
logger.error(f"Failed to import UI module: {str(e)}")
|
32 |
+
raise
|
33 |
+
|
34 |
+
if __name__ == "__main__":
|
35 |
+
logger.info("Starting AI Creative Studio on Hugging Face Spaces")
|
36 |
+
ui_main()
|
app/.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
app/config/execution.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"input_class" : {
|
3 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.input",
|
4 |
+
"class" : "InputClass"
|
5 |
+
},
|
6 |
+
"input_schema" : {
|
7 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.input",
|
8 |
+
"class" : "InputClassSchema"
|
9 |
+
},
|
10 |
+
"output_class" : {
|
11 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.output",
|
12 |
+
"class" : "OutputClass"
|
13 |
+
},
|
14 |
+
"output_schema" : {
|
15 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.output",
|
16 |
+
"class" : "OutputClassSchema"
|
17 |
+
},
|
18 |
+
"config_class" : {
|
19 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.config",
|
20 |
+
"class" : "ConfigClass"
|
21 |
+
},
|
22 |
+
"config_schema" : {
|
23 |
+
"package" : "ontology_dc8f06af066e4a7880a5938933236037.config",
|
24 |
+
"class" : "ConfigClassSchema"
|
25 |
+
},
|
26 |
+
"main_callback" : {
|
27 |
+
"package" : "main",
|
28 |
+
"function" : "execute"
|
29 |
+
},
|
30 |
+
"config_callback" : {
|
31 |
+
"package" : "main",
|
32 |
+
"function" : "config"
|
33 |
+
}
|
34 |
+
}
|
app/config/manifest.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "ai-test",
|
3 |
+
"version": "1.0",
|
4 |
+
"description": "An application for demonstrating your skills in AI",
|
5 |
+
"organization": "",
|
6 |
+
"sdk": "0.2.9",
|
7 |
+
"overview": "",
|
8 |
+
"input": "dc8f06af066e4a7880a5938933236037/b6acb533a2128948785b689c87119355/1.0",
|
9 |
+
"output": "dc8f06af066e4a7880a5938933236037/154ac0a492d67f103915099d81a49f03/1.0",
|
10 |
+
"config": "dc8f06af066e4a7880a5938933236037/b0fe7844500e8e21c39605e13d5e64e4/1.0"
|
11 |
+
}
|
app/config/properties.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"key": "key",
|
4 |
+
"value": "value"
|
5 |
+
}
|
6 |
+
]
|
app/config/state.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"super-user": {
|
3 |
+
"app_ids": [
|
4 |
+
"c25dcd829d134ea98f5ae4dd311d13bc.node3.openfabric.network",
|
5 |
+
"f0b5f319156c4819b9827000b17e511a.node3.openfabric.network"
|
6 |
+
]
|
7 |
+
}
|
8 |
+
}
|
app/core/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app/core/__init__.py
ADDED
File without changes
|
app/core/image_to_3d.py
ADDED
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import base64
|
4 |
+
from typing import Dict, Optional, Any, Tuple
|
5 |
+
import json
|
6 |
+
from pathlib import Path
|
7 |
+
import time
|
8 |
+
import uuid
|
9 |
+
import random
|
10 |
+
import requests
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
|
13 |
+
from .stub import Stub
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
|
17 |
+
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
|
21 |
+
class ImageTo3DGenerator:
|
22 |
+
"""
|
23 |
+
Handles the image-to-3D generation using Openfabric's API.
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(self, stub: Stub, app_id: str = None):
|
27 |
+
"""
|
28 |
+
Initialize the image-to-3D generator.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
stub: Stub instance for communicating with Openfabric
|
32 |
+
app_id: The app ID for the image-to-3D service (default: from env var)
|
33 |
+
"""
|
34 |
+
self.stub = stub
|
35 |
+
self.app_id = app_id or os.environ.get("IMAGE_TO_3D_APP_ID")
|
36 |
+
|
37 |
+
# Maximum time to wait for job completion (in seconds)
|
38 |
+
self.max_wait_time = 300 # 5 minutes
|
39 |
+
|
40 |
+
# Polling interval for checking job status (in seconds)
|
41 |
+
self.polling_interval = 5 # Check every 5 seconds
|
42 |
+
|
43 |
+
# Use default output directory if MODEL_OUTPUT_DIR is not set
|
44 |
+
model_output_dir = os.environ.get("MODEL_OUTPUT_DIR")
|
45 |
+
if model_output_dir is None:
|
46 |
+
# Default to app/data/models
|
47 |
+
self.output_dir = Path(__file__).parent.parent / "data" / "models"
|
48 |
+
logger.warning(
|
49 |
+
f"MODEL_OUTPUT_DIR not set, using default: {self.output_dir}"
|
50 |
+
)
|
51 |
+
else:
|
52 |
+
self.output_dir = Path(model_output_dir)
|
53 |
+
|
54 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
55 |
+
|
56 |
+
# Cache the schema and manifest - don't raise exceptions to allow fallback mode
|
57 |
+
try:
|
58 |
+
self.input_schema = self.stub.schema(self.app_id, "input")
|
59 |
+
self.output_schema = self.stub.schema(self.app_id, "output")
|
60 |
+
self.manifest = self.stub.manifest(self.app_id)
|
61 |
+
logger.info(
|
62 |
+
f"Successfully loaded schema and manifest for image-to-3D app: {self.app_id}"
|
63 |
+
)
|
64 |
+
except Exception as e:
|
65 |
+
logger.warning(f"Failed to load schema/manifest for image-to-3D app: {e}")
|
66 |
+
|
67 |
+
def generate(
|
68 |
+
self, image_path: str, params: Optional[Dict[str, Any]] = None
|
69 |
+
) -> Tuple[str, str]:
|
70 |
+
"""
|
71 |
+
Generate a 3D model from an image.
|
72 |
+
|
73 |
+
Args:
|
74 |
+
image_path: Path to the source image file
|
75 |
+
params: Additional parameters for 3D generation
|
76 |
+
|
77 |
+
Returns:
|
78 |
+
Tuple of (model_path, metadata_path)
|
79 |
+
"""
|
80 |
+
# Read the image and convert to base64
|
81 |
+
try:
|
82 |
+
with open(image_path, "rb") as img_file:
|
83 |
+
image_data = base64.b64encode(img_file.read()).decode("utf-8")
|
84 |
+
except Exception as e:
|
85 |
+
logger.error(f"Failed to read image at {image_path}: {e}")
|
86 |
+
raise
|
87 |
+
|
88 |
+
# Prepare the request based on the input schema
|
89 |
+
request_data = self._prepare_request(image_data, params)
|
90 |
+
|
91 |
+
# Log the request
|
92 |
+
logger.info(f"Sending image-to-3D request for image: {image_path}")
|
93 |
+
|
94 |
+
# Send the request to Openfabric
|
95 |
+
rid = None
|
96 |
+
try:
|
97 |
+
start_time = time.time()
|
98 |
+
|
99 |
+
# Make the API call - this will return immediately with a request ID
|
100 |
+
response = self.stub.call(self.app_id, request_data)
|
101 |
+
|
102 |
+
# Extract the request ID from logs
|
103 |
+
rid = self._extract_rid_from_logs()
|
104 |
+
if not rid:
|
105 |
+
raise ValueError("Failed to extract request ID from logs")
|
106 |
+
|
107 |
+
logger.info(f"Submitted image-to-3D job with request ID: {rid}")
|
108 |
+
|
109 |
+
# Poll for job completion
|
110 |
+
qid, result = self._poll_for_completion(rid)
|
111 |
+
|
112 |
+
generation_time = time.time() - start_time
|
113 |
+
logger.info(f"Image-to-3D generation completed in {generation_time:.2f}s")
|
114 |
+
|
115 |
+
if not result:
|
116 |
+
raise ValueError("Failed to get result data after job completion")
|
117 |
+
|
118 |
+
# Process and save the result
|
119 |
+
return self._process_result(result, image_path)
|
120 |
+
|
121 |
+
except Exception as e:
|
122 |
+
logger.error(f"Failed to generate 3D model: {e}")
|
123 |
+
raise
|
124 |
+
|
125 |
+
def _extract_rid_from_logs(self) -> str:
|
126 |
+
"""
|
127 |
+
Extract the request ID (rid) from logs.
|
128 |
+
The stub logs the rid when it creates a request in the format "Created rid{rid}"
|
129 |
+
|
130 |
+
Returns:
|
131 |
+
Request ID string or None if not found
|
132 |
+
"""
|
133 |
+
import re
|
134 |
+
|
135 |
+
# Try to scan the last few log lines for the request ID pattern
|
136 |
+
log_handler = next(
|
137 |
+
(
|
138 |
+
h
|
139 |
+
for h in logging.getLogger("root").handlers
|
140 |
+
if isinstance(h, logging.StreamHandler)
|
141 |
+
),
|
142 |
+
None,
|
143 |
+
)
|
144 |
+
|
145 |
+
if hasattr(log_handler, "stream") and hasattr(log_handler.stream, "getvalue"):
|
146 |
+
# This is for StringIO in testing environments
|
147 |
+
log_content = log_handler.stream.getvalue()
|
148 |
+
matches = re.findall(r"Created rid([a-f0-9]+)", log_content)
|
149 |
+
if matches:
|
150 |
+
return matches[-1] # Return the most recent match
|
151 |
+
|
152 |
+
# Alternative approach: look for the most recently created request
|
153 |
+
try:
|
154 |
+
queue_url = f"https://{self.app_id}/queue/list"
|
155 |
+
response = requests.get(queue_url)
|
156 |
+
if response.status_code == 200:
|
157 |
+
job_list = response.json()
|
158 |
+
if job_list and isinstance(job_list, list) and len(job_list) > 0:
|
159 |
+
# Sort by creation time (newest first) and get the first one
|
160 |
+
sorted_jobs = sorted(
|
161 |
+
job_list, key=lambda x: x.get("created_at", ""), reverse=True
|
162 |
+
)
|
163 |
+
return sorted_jobs[0].get("rid")
|
164 |
+
except Exception as e:
|
165 |
+
logger.warning(f"Failed to get request ID from queue: {e}")
|
166 |
+
|
167 |
+
return None
|
168 |
+
|
169 |
+
def _poll_for_completion(self, rid: str) -> Tuple[str, Dict[str, Any]]:
|
170 |
+
"""
|
171 |
+
Poll the queue list endpoint until the job is complete.
|
172 |
+
|
173 |
+
Args:
|
174 |
+
rid: Request ID to check
|
175 |
+
|
176 |
+
Returns:
|
177 |
+
Tuple of (queue_id, result_data)
|
178 |
+
"""
|
179 |
+
start_time = time.time()
|
180 |
+
qid = None
|
181 |
+
result = None
|
182 |
+
|
183 |
+
logger.info(f"Waiting for job completion (rid: {rid})...")
|
184 |
+
|
185 |
+
while (time.time() - start_time) < self.max_wait_time:
|
186 |
+
try:
|
187 |
+
# Get the queue list
|
188 |
+
queue_url = f"https://{self.app_id}/queue/list"
|
189 |
+
response = requests.get(queue_url)
|
190 |
+
|
191 |
+
if response.status_code != 200:
|
192 |
+
logger.error(f"Failed to get queue list: {response.status_code}")
|
193 |
+
time.sleep(self.polling_interval)
|
194 |
+
continue
|
195 |
+
|
196 |
+
# Parse the response and find our job
|
197 |
+
job_list = response.json()
|
198 |
+
if not isinstance(job_list, list):
|
199 |
+
logger.error(f"Unexpected queue list format: {type(job_list)}")
|
200 |
+
time.sleep(self.polling_interval)
|
201 |
+
continue
|
202 |
+
|
203 |
+
# Find our job by request ID
|
204 |
+
our_job = next((job for job in job_list if job.get("rid") == rid), None)
|
205 |
+
|
206 |
+
if not our_job:
|
207 |
+
logger.warning(f"Job with rid {rid} not found in queue")
|
208 |
+
time.sleep(self.polling_interval)
|
209 |
+
continue
|
210 |
+
|
211 |
+
# Get queue ID if we don't have it yet
|
212 |
+
if not qid:
|
213 |
+
qid = our_job.get("qid")
|
214 |
+
logger.info(f"Found job with qid: {qid}")
|
215 |
+
|
216 |
+
# Check if job is finished
|
217 |
+
if our_job.get("finished") and our_job.get("status") == "COMPLETED":
|
218 |
+
logger.info(f"Job completed successfully")
|
219 |
+
|
220 |
+
# Get the detailed result
|
221 |
+
result_url = f"https://{self.app_id}/queue/get?qid={qid}"
|
222 |
+
result_response = requests.get(result_url)
|
223 |
+
|
224 |
+
if result_response.status_code == 200:
|
225 |
+
result = result_response.json()
|
226 |
+
logger.info(f"Got result data: {result}")
|
227 |
+
return qid, result
|
228 |
+
else:
|
229 |
+
logger.error(
|
230 |
+
f"Failed to get result data: {result_response.status_code}"
|
231 |
+
)
|
232 |
+
|
233 |
+
elif our_job.get("finished") and our_job.get("status") != "COMPLETED":
|
234 |
+
# Job failed
|
235 |
+
status = our_job.get("status")
|
236 |
+
messages = our_job.get("messages", [])
|
237 |
+
error_msgs = [
|
238 |
+
m.get("content") for m in messages if m.get("type") == "ERROR"
|
239 |
+
]
|
240 |
+
|
241 |
+
error_msg = f"Job failed with status: {status}"
|
242 |
+
if error_msgs:
|
243 |
+
error_msg += f", errors: {'; '.join(error_msgs)}"
|
244 |
+
|
245 |
+
logger.error(error_msg)
|
246 |
+
raise ValueError(error_msg)
|
247 |
+
|
248 |
+
# Job is still running
|
249 |
+
status = our_job.get("status")
|
250 |
+
progress = (
|
251 |
+
our_job.get("bars", {}).get("default", {}).get("percent", "0")
|
252 |
+
)
|
253 |
+
logger.info(f"Job status: {status}, progress: {progress}%")
|
254 |
+
|
255 |
+
except Exception as e:
|
256 |
+
logger.error(f"Error polling for job completion: {e}")
|
257 |
+
|
258 |
+
# Wait before checking again
|
259 |
+
time.sleep(self.polling_interval)
|
260 |
+
|
261 |
+
# If we get here, we timed out
|
262 |
+
raise TimeoutError(
|
263 |
+
f"Timed out waiting for job completion after {self.max_wait_time} seconds"
|
264 |
+
)
|
265 |
+
|
266 |
+
def _prepare_request(
|
267 |
+
self, image_data: str, params: Optional[Dict[str, Any]] = None
|
268 |
+
) -> Dict[str, Any]:
|
269 |
+
"""
|
270 |
+
Prepare the request payload based on the app's input schema.
|
271 |
+
|
272 |
+
Args:
|
273 |
+
image_data: Base64-encoded image data
|
274 |
+
params: Additional parameters for 3D generation
|
275 |
+
|
276 |
+
Returns:
|
277 |
+
Dict containing the properly formatted request payload
|
278 |
+
"""
|
279 |
+
# Default parameters for image-to-3D transformation
|
280 |
+
default_params = {
|
281 |
+
"model_type": "textured", # Options might include: textured, mesh, point_cloud
|
282 |
+
"quality": "standard", # Options might include: draft, standard, high
|
283 |
+
"format": "glb", # Output format: glb, obj, etc.
|
284 |
+
}
|
285 |
+
|
286 |
+
# Override defaults with provided params
|
287 |
+
request_params = {**default_params, **(params or {})}
|
288 |
+
|
289 |
+
# Create request based on the actual input schema
|
290 |
+
# The schema specifies 'input_image' as the required field, not 'image'
|
291 |
+
request = {"input_image": image_data, **request_params}
|
292 |
+
|
293 |
+
return request
|
294 |
+
|
295 |
+
def _process_result(
|
296 |
+
self, result: Dict[str, Any], image_path: str
|
297 |
+
) -> Tuple[str, str]:
|
298 |
+
"""
|
299 |
+
Process the result from the image-to-3D app.
|
300 |
+
|
301 |
+
Args:
|
302 |
+
result: The API response
|
303 |
+
image_path: Path to the source image
|
304 |
+
|
305 |
+
Returns:
|
306 |
+
Tuple of (model_path, metadata_path)
|
307 |
+
"""
|
308 |
+
# If result is None, raise an error - don't use mock data
|
309 |
+
if result is None:
|
310 |
+
raise ValueError("No result received from image-to-3D generation service")
|
311 |
+
|
312 |
+
try:
|
313 |
+
model_format = "glb" # Default format for 3D models
|
314 |
+
has_video_preview = False
|
315 |
+
video_data = None
|
316 |
+
model_base64 = None
|
317 |
+
|
318 |
+
# Process Openfabric blob response format (most common case)
|
319 |
+
# This will have 'generated_object' as a data_blob ID (without the base64 data)
|
320 |
+
if "generated_object" in result:
|
321 |
+
# Extract the model data or model blob reference
|
322 |
+
model_data = result.get("generated_object")
|
323 |
+
|
324 |
+
# Check if this is a blob reference (data URI format or plain string)
|
325 |
+
if isinstance(model_data, str):
|
326 |
+
if "/" in model_data or model_data.startswith("data_"):
|
327 |
+
# This is a blob ID reference
|
328 |
+
blob_id = model_data
|
329 |
+
logger.info(f"3D model generation result ID: {blob_id}")
|
330 |
+
|
331 |
+
# Prepare the blob URL and try to download the actual model data
|
332 |
+
try:
|
333 |
+
# Construct URL for the blob
|
334 |
+
resource_url = (
|
335 |
+
f"https://{self.app_id}/resource?reid={blob_id}"
|
336 |
+
)
|
337 |
+
logger.info(
|
338 |
+
f"Fetching 3D model from blob URL: {resource_url}"
|
339 |
+
)
|
340 |
+
|
341 |
+
response = requests.get(resource_url)
|
342 |
+
|
343 |
+
if response.status_code == 200:
|
344 |
+
# We have the actual model data
|
345 |
+
model_binary = response.content
|
346 |
+
model_base64 = base64.b64encode(model_binary).decode(
|
347 |
+
"utf-8"
|
348 |
+
)
|
349 |
+
logger.info(
|
350 |
+
f"Successfully fetched 3D model from blob store"
|
351 |
+
)
|
352 |
+
|
353 |
+
# Set format based on content-type if available
|
354 |
+
content_type = response.headers.get("Content-Type", "")
|
355 |
+
if "gltf-binary" in content_type:
|
356 |
+
model_format = "glb"
|
357 |
+
elif (
|
358 |
+
"gltf+json" in content_type
|
359 |
+
or "json" in content_type
|
360 |
+
):
|
361 |
+
model_format = "gltf"
|
362 |
+
else:
|
363 |
+
logger.error(
|
364 |
+
f"Failed to fetch blob: {response.status_code} - {response.text}"
|
365 |
+
)
|
366 |
+
raise ValueError(
|
367 |
+
f"Failed to fetch blob data: {response.status_code}"
|
368 |
+
)
|
369 |
+
except Exception as blob_error:
|
370 |
+
logger.error(f"Error accessing blob store: {blob_error}")
|
371 |
+
raise ValueError(
|
372 |
+
f"Failed to fetch 3D model from blob store: {blob_error}"
|
373 |
+
)
|
374 |
+
elif "," in model_data and "base64" in model_data:
|
375 |
+
# Extract base64 data if in data URI format
|
376 |
+
model_base64 = model_data.split(",", 1)[1]
|
377 |
+
else:
|
378 |
+
# Use as-is for plain base64 data
|
379 |
+
model_base64 = model_data
|
380 |
+
else:
|
381 |
+
# If model_data is not a string, this is an unexpected format
|
382 |
+
raise ValueError(
|
383 |
+
f"Unexpected generated_object type: {type(model_data)}"
|
384 |
+
)
|
385 |
+
|
386 |
+
# Also handle video preview if available
|
387 |
+
video_data = result.get("video_object")
|
388 |
+
has_video_preview = video_data is not None and video_data != ""
|
389 |
+
|
390 |
+
# Handle result blob reference format (alternative format)
|
391 |
+
elif "result" in result:
|
392 |
+
blob_id = result.get("result")
|
393 |
+
logger.info(f"3D model generation result ID: {blob_id}")
|
394 |
+
|
395 |
+
# Try to fetch the actual model data from the blob store
|
396 |
+
try:
|
397 |
+
# Construct URL for the blob
|
398 |
+
resource_url = f"https://{self.app_id}/resource?reid={blob_id}"
|
399 |
+
logger.info(f"Fetching 3D model from blob URL: {resource_url}")
|
400 |
+
|
401 |
+
response = requests.get(resource_url)
|
402 |
+
|
403 |
+
if response.status_code == 200:
|
404 |
+
# We have the actual model data
|
405 |
+
model_binary = response.content
|
406 |
+
model_base64 = base64.b64encode(model_binary).decode("utf-8")
|
407 |
+
logger.info(f"Successfully fetched 3D model from blob store")
|
408 |
+
|
409 |
+
# Set format based on content-type if available
|
410 |
+
content_type = response.headers.get("Content-Type", "")
|
411 |
+
if "gltf-binary" in content_type:
|
412 |
+
model_format = "glb"
|
413 |
+
elif "gltf+json" in content_type or "json" in content_type:
|
414 |
+
model_format = "gltf"
|
415 |
+
else:
|
416 |
+
logger.error(
|
417 |
+
f"Failed to fetch blob: {response.status_code} - {response.text}"
|
418 |
+
)
|
419 |
+
raise ValueError(
|
420 |
+
f"Failed to fetch blob data: {response.status_code}"
|
421 |
+
)
|
422 |
+
except Exception as blob_error:
|
423 |
+
logger.error(f"Error accessing blob store: {blob_error}")
|
424 |
+
raise ValueError(
|
425 |
+
f"Failed to fetch 3D model from blob store: {blob_error}"
|
426 |
+
)
|
427 |
+
|
428 |
+
# Handle direct model data format (which has 'model' field)
|
429 |
+
elif "model" in result:
|
430 |
+
model_data = result.get("model")
|
431 |
+
model_format = result.get("format", "glb")
|
432 |
+
|
433 |
+
if isinstance(model_data, str):
|
434 |
+
if "," in model_data:
|
435 |
+
# Extract base64 data if in data URI format
|
436 |
+
model_base64 = model_data.split(",", 1)[1]
|
437 |
+
else:
|
438 |
+
model_base64 = model_data
|
439 |
+
else:
|
440 |
+
# Use as-is for binary data
|
441 |
+
model_base64 = model_data
|
442 |
+
has_video_preview = False
|
443 |
+
else:
|
444 |
+
raise KeyError(
|
445 |
+
f"Could not identify response format. Keys: {list(result.keys())}"
|
446 |
+
)
|
447 |
+
|
448 |
+
if not model_base64:
|
449 |
+
raise ValueError("No model data found in the result")
|
450 |
+
|
451 |
+
# Generate a unique ID for this model and timestamp
|
452 |
+
model_id = str(uuid.uuid4())
|
453 |
+
timestamp = int(time.time())
|
454 |
+
|
455 |
+
# NEW: Extract the base filename from the source image to use for the model
|
456 |
+
source_image_filename = Path(image_path).name
|
457 |
+
base_name = source_image_filename.rsplit(".", 1)[0] # Remove extension
|
458 |
+
|
459 |
+
# If base name doesn't already include timestamp, add it
|
460 |
+
if not any(c.isdigit() for c in base_name):
|
461 |
+
base_name = f"{base_name}_{timestamp}"
|
462 |
+
|
463 |
+
# Append "_3d" to clearly indicate this is a 3D model derived from the image
|
464 |
+
base_name = f"{base_name}_3d"
|
465 |
+
|
466 |
+
# Create filenames based on the image name pattern
|
467 |
+
model_filename = f"{base_name}.{model_format}"
|
468 |
+
metadata_filename = f"{base_name}.json"
|
469 |
+
|
470 |
+
# Create paths for model and metadata
|
471 |
+
model_path = self.output_dir / model_filename
|
472 |
+
metadata_path = self.output_dir / metadata_filename
|
473 |
+
|
474 |
+
# Create path for video preview if available
|
475 |
+
video_path = None
|
476 |
+
if has_video_preview and video_data:
|
477 |
+
video_filename = f"{base_name}_preview.mp4"
|
478 |
+
video_path = self.output_dir / video_filename
|
479 |
+
try:
|
480 |
+
# Extract video base64 data
|
481 |
+
video_base64 = video_data
|
482 |
+
if isinstance(video_data, str) and "," in video_data:
|
483 |
+
video_base64 = video_data.split(",", 1)[1]
|
484 |
+
|
485 |
+
# Save the video preview
|
486 |
+
with open(video_path, "wb") as video_file:
|
487 |
+
video_file.write(base64.b64decode(video_base64))
|
488 |
+
logger.info(f"Video preview saved to {video_path}")
|
489 |
+
except Exception as video_error:
|
490 |
+
logger.error(f"Failed to save video preview: {video_error}")
|
491 |
+
video_path = None
|
492 |
+
|
493 |
+
# Save the model file
|
494 |
+
with open(model_path, "wb") as model_file:
|
495 |
+
model_file.write(base64.b64decode(model_base64))
|
496 |
+
|
497 |
+
# Save metadata linking image to 3D model
|
498 |
+
metadata = {
|
499 |
+
"id": model_id,
|
500 |
+
"timestamp": timestamp,
|
501 |
+
"source_image": image_path,
|
502 |
+
"source_image_filename": source_image_filename,
|
503 |
+
"file_path": str(model_path),
|
504 |
+
"format": model_format,
|
505 |
+
"type": "3d_model",
|
506 |
+
"has_video_preview": has_video_preview,
|
507 |
+
"video_path": str(video_path) if video_path else None,
|
508 |
+
"result_id": result.get("result", result.get("generated_object", "")),
|
509 |
+
"parameters": result.get("parameters", {}),
|
510 |
+
}
|
511 |
+
|
512 |
+
with open(metadata_path, "w") as meta_file:
|
513 |
+
json.dump(metadata, meta_file)
|
514 |
+
|
515 |
+
logger.info(f"3D model saved to {model_path}")
|
516 |
+
logger.info(f"Metadata saved to {metadata_path}")
|
517 |
+
|
518 |
+
return str(model_path), str(metadata_path)
|
519 |
+
|
520 |
+
except Exception as e:
|
521 |
+
logger.error(f"Failed to process 3D model result: {e}")
|
522 |
+
raise
|
app/core/pipeline.py
ADDED
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import json
|
5 |
+
import requests
|
6 |
+
from typing import Dict, Any, Optional, Tuple
|
7 |
+
from pathlib import Path
|
8 |
+
import importlib
|
9 |
+
import importlib.util
|
10 |
+
|
11 |
+
# Use relative imports for modules in the same package
|
12 |
+
from .text_to_image import TextToImageGenerator
|
13 |
+
from .image_to_3d import ImageTo3DGenerator
|
14 |
+
from .stub import Stub
|
15 |
+
|
16 |
+
# Remove the sys.path.append since we'll use relative imports instead
|
17 |
+
sys.path.append(
|
18 |
+
str(Path(__file__).parent.parent)
|
19 |
+
) # Add parent directory to path for imports
|
20 |
+
from llm.client import LLMClient
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
class PipelineResult:
|
26 |
+
"""Data class to store the results of a creation pipeline run"""
|
27 |
+
|
28 |
+
def __init__(
|
29 |
+
self,
|
30 |
+
success: bool = False,
|
31 |
+
original_prompt: str = None,
|
32 |
+
expanded_prompt: str = None,
|
33 |
+
image_path: str = None,
|
34 |
+
image_metadata_path: str = None,
|
35 |
+
model_path: str = None,
|
36 |
+
model_metadata_path: str = None,
|
37 |
+
):
|
38 |
+
self.success = success
|
39 |
+
self.original_prompt = original_prompt
|
40 |
+
self.expanded_prompt = expanded_prompt
|
41 |
+
self.image_path = image_path
|
42 |
+
self.image_metadata_path = image_metadata_path
|
43 |
+
self.model_path = model_path
|
44 |
+
self.model_metadata_path = model_metadata_path
|
45 |
+
|
46 |
+
def to_dict(self) -> Dict[str, Any]:
|
47 |
+
"""Convert result to dictionary representation"""
|
48 |
+
return {
|
49 |
+
"success": self.success,
|
50 |
+
"original_prompt": self.original_prompt,
|
51 |
+
"expanded_prompt": self.expanded_prompt,
|
52 |
+
"image_path": self.image_path,
|
53 |
+
"image_metadata_path": self.image_metadata_path,
|
54 |
+
"model_path": self.model_path,
|
55 |
+
"model_metadata_path": self.model_metadata_path,
|
56 |
+
}
|
57 |
+
|
58 |
+
|
59 |
+
class CreativePipeline:
|
60 |
+
"""
|
61 |
+
Orchestrates the end-to-end creative pipeline from prompt to 3D model.
|
62 |
+
|
63 |
+
Flow:
|
64 |
+
1. Take user prompt
|
65 |
+
2. Enhance with local LLM
|
66 |
+
3. Generate image from enhanced prompt
|
67 |
+
4. Create 3D model from image
|
68 |
+
5. Return comprehensive results
|
69 |
+
"""
|
70 |
+
|
71 |
+
def __init__(self, stub: Stub):
|
72 |
+
"""
|
73 |
+
Initialize the creative pipeline components.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
stub: Stub instance for communicating with Openfabric apps
|
77 |
+
"""
|
78 |
+
self.stub = stub
|
79 |
+
|
80 |
+
# Initialize LLM client
|
81 |
+
llm_service_url = os.environ.get("LLM_SERVICE_URL")
|
82 |
+
self.llm_client = LLMClient(base_url=llm_service_url)
|
83 |
+
|
84 |
+
# Initialize generators
|
85 |
+
self.text_to_image = TextToImageGenerator(stub)
|
86 |
+
self.image_to_3d = ImageTo3DGenerator(stub)
|
87 |
+
|
88 |
+
# Ensure app/data directories exist
|
89 |
+
data_dir = Path(__file__).parent.parent / "data"
|
90 |
+
data_dir.mkdir(exist_ok=True)
|
91 |
+
(data_dir / "images").mkdir(exist_ok=True)
|
92 |
+
(data_dir / "models").mkdir(exist_ok=True)
|
93 |
+
(data_dir / "downloads").mkdir(exist_ok=True)
|
94 |
+
|
95 |
+
logger.info("Creative pipeline initialized successfully")
|
96 |
+
|
97 |
+
def create(self, prompt: str, params: Dict[str, Any] = None) -> PipelineResult:
|
98 |
+
"""
|
99 |
+
Run the creative pipeline from text prompt to 3D model.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
prompt: The user's text prompt
|
103 |
+
params: Optional parameters for the pipeline stages
|
104 |
+
|
105 |
+
Returns:
|
106 |
+
PipelineResult object with paths to generated assets
|
107 |
+
"""
|
108 |
+
try:
|
109 |
+
# 1. Enhance the prompt with the LLM
|
110 |
+
logger.info(f"Enhancing prompt: '{prompt}'")
|
111 |
+
try:
|
112 |
+
expanded_prompt = self.llm_client.expand_prompt(prompt)
|
113 |
+
logger.info(f"Enhanced prompt: '{expanded_prompt}'")
|
114 |
+
except Exception as e:
|
115 |
+
logger.warning(f"Failed to enhance prompt: {e}")
|
116 |
+
# Fall back to original prompt if enhancement fails
|
117 |
+
expanded_prompt = prompt
|
118 |
+
logger.info(f"Using original prompt: '{expanded_prompt}'")
|
119 |
+
|
120 |
+
# 2. Generate image from the enhanced prompt
|
121 |
+
image_params = params.get("image", {}) if params else {}
|
122 |
+
image_path, image_metadata_path = self.text_to_image.generate(
|
123 |
+
expanded_prompt, image_params, original_prompt=prompt
|
124 |
+
)
|
125 |
+
|
126 |
+
# If image_path is None but we have metadata, we need to download from blob store
|
127 |
+
if image_path is None and image_metadata_path:
|
128 |
+
try:
|
129 |
+
# Import the blob viewer downloader directly
|
130 |
+
tools_dir = str(Path(__file__).parent.parent / "tools")
|
131 |
+
sys.path.append(tools_dir)
|
132 |
+
|
133 |
+
try:
|
134 |
+
# Try direct import first
|
135 |
+
from tools.blob_viewer import (
|
136 |
+
download_resource,
|
137 |
+
construct_resource_url,
|
138 |
+
)
|
139 |
+
except ImportError:
|
140 |
+
# If that fails, use importlib with proper error handling
|
141 |
+
blob_viewer_path = os.path.join(tools_dir, "blob_viewer.py")
|
142 |
+
spec = importlib.util.spec_from_file_location(
|
143 |
+
"blob_viewer", blob_viewer_path
|
144 |
+
)
|
145 |
+
blob_viewer = importlib.util.module_from_spec(spec)
|
146 |
+
spec.loader.exec_module(blob_viewer)
|
147 |
+
download_resource = blob_viewer.download_resource
|
148 |
+
construct_resource_url = blob_viewer.construct_resource_url
|
149 |
+
|
150 |
+
# Read metadata to get blob ID and other info
|
151 |
+
with open(image_metadata_path, "r") as f:
|
152 |
+
metadata = json.load(f)
|
153 |
+
|
154 |
+
if "result_id" in metadata and metadata["result_id"] != "mock":
|
155 |
+
blob_id = metadata["result_id"]
|
156 |
+
logger.info(f"Downloading image from blob store: {blob_id}")
|
157 |
+
|
158 |
+
# Parse the blob ID
|
159 |
+
parts = blob_id.split("/")
|
160 |
+
data_blob_id = parts[0]
|
161 |
+
execution_id = parts[2] if len(parts) > 2 else None
|
162 |
+
|
163 |
+
# Target directory
|
164 |
+
images_dir = Path(__file__).parent.parent / "data" / "images"
|
165 |
+
|
166 |
+
# Get the existing metadata filename and use it for the image
|
167 |
+
metadata_filename = Path(image_metadata_path).name
|
168 |
+
base_filename = metadata_filename.rsplit(".", 1)[0]
|
169 |
+
image_filename = f"{base_filename}.png"
|
170 |
+
|
171 |
+
# Prepare full paths
|
172 |
+
target_image_path = images_dir / image_filename
|
173 |
+
|
174 |
+
# Update the metadata file with additional information
|
175 |
+
metadata.update(
|
176 |
+
{
|
177 |
+
"original_prompt": prompt,
|
178 |
+
"expanded_prompt": expanded_prompt,
|
179 |
+
"needs_download": False, # Mark as downloaded
|
180 |
+
}
|
181 |
+
)
|
182 |
+
|
183 |
+
# Write the updated metadata back to the file
|
184 |
+
with open(image_metadata_path, "w") as f:
|
185 |
+
json.dump(metadata, f, indent=2)
|
186 |
+
|
187 |
+
# Call the download function with our custom path
|
188 |
+
url = construct_resource_url(data_blob_id, execution_id)
|
189 |
+
response = requests.get(url)
|
190 |
+
|
191 |
+
if response.status_code == 200:
|
192 |
+
with open(target_image_path, "wb") as f:
|
193 |
+
f.write(response.content)
|
194 |
+
image_path = str(target_image_path)
|
195 |
+
logger.info(f"Generated image at {image_path}")
|
196 |
+
else:
|
197 |
+
logger.error(
|
198 |
+
f"Failed to download image: {response.status_code}"
|
199 |
+
)
|
200 |
+
return PipelineResult(
|
201 |
+
success=False,
|
202 |
+
original_prompt=prompt,
|
203 |
+
expanded_prompt=expanded_prompt,
|
204 |
+
)
|
205 |
+
except Exception as e:
|
206 |
+
logger.error(f"Failed to download image from blob store: {e}")
|
207 |
+
return PipelineResult(
|
208 |
+
success=False,
|
209 |
+
original_prompt=prompt,
|
210 |
+
expanded_prompt=expanded_prompt,
|
211 |
+
)
|
212 |
+
|
213 |
+
# Return early if we couldn't generate an image
|
214 |
+
if not image_path:
|
215 |
+
logger.error("Failed to generate image")
|
216 |
+
return PipelineResult(
|
217 |
+
success=False,
|
218 |
+
original_prompt=prompt,
|
219 |
+
expanded_prompt=expanded_prompt,
|
220 |
+
)
|
221 |
+
|
222 |
+
logger.info(f"Generated image at {image_path}")
|
223 |
+
|
224 |
+
# 3. Generate 3D model from the image
|
225 |
+
model_params = params.get("model", {}) if params else {}
|
226 |
+
try:
|
227 |
+
logger.info(f"Starting 3D model generation from image: {image_path}")
|
228 |
+
# The generate method will now handle all the asynchronous processing internally
|
229 |
+
model_path, model_metadata_path = self.image_to_3d.generate(
|
230 |
+
image_path, model_params
|
231 |
+
)
|
232 |
+
|
233 |
+
# Verify the model was generated successfully
|
234 |
+
if not Path(model_path).exists():
|
235 |
+
raise FileNotFoundError(
|
236 |
+
f"Generated model file not found at {model_path}"
|
237 |
+
)
|
238 |
+
|
239 |
+
logger.info(f"Successfully generated 3D model at {model_path}")
|
240 |
+
logger.info(f"Model metadata saved at {model_metadata_path}")
|
241 |
+
|
242 |
+
# Load metadata to include additional details in the response
|
243 |
+
try:
|
244 |
+
with open(model_metadata_path, "r") as f:
|
245 |
+
model_metadata = json.load(f)
|
246 |
+
logger.info(
|
247 |
+
f"3D model format: {model_metadata.get('format', 'unknown')}"
|
248 |
+
)
|
249 |
+
|
250 |
+
# Check for video preview
|
251 |
+
if model_metadata.get("has_video_preview") and model_metadata.get(
|
252 |
+
"video_path"
|
253 |
+
):
|
254 |
+
logger.info(
|
255 |
+
f"3D model includes video preview at {model_metadata.get('video_path')}"
|
256 |
+
)
|
257 |
+
except Exception as metadata_err:
|
258 |
+
logger.warning(f"Could not read model metadata: {metadata_err}")
|
259 |
+
|
260 |
+
# Successful full pipeline
|
261 |
+
return PipelineResult(
|
262 |
+
success=True,
|
263 |
+
original_prompt=prompt,
|
264 |
+
expanded_prompt=expanded_prompt,
|
265 |
+
image_path=image_path,
|
266 |
+
image_metadata_path=image_metadata_path,
|
267 |
+
model_path=model_path,
|
268 |
+
model_metadata_path=model_metadata_path,
|
269 |
+
)
|
270 |
+
except Exception as e:
|
271 |
+
logger.error(f"Failed to generate 3D model: {e}")
|
272 |
+
# Partial pipeline success (image only)
|
273 |
+
return PipelineResult(
|
274 |
+
success=False,
|
275 |
+
original_prompt=prompt,
|
276 |
+
expanded_prompt=expanded_prompt,
|
277 |
+
image_path=image_path,
|
278 |
+
image_metadata_path=image_metadata_path,
|
279 |
+
)
|
280 |
+
|
281 |
+
except Exception as e:
|
282 |
+
logger.error(f"Pipeline error: {e}")
|
283 |
+
return PipelineResult(success=False, original_prompt=prompt)
|
284 |
+
|
285 |
+
def health_check(self) -> Dict[str, Any]:
|
286 |
+
"""
|
287 |
+
Check health status of all components.
|
288 |
+
|
289 |
+
Returns:
|
290 |
+
Dictionary with health status of each component
|
291 |
+
"""
|
292 |
+
health = {
|
293 |
+
"pipeline": "initializing",
|
294 |
+
"llm": "unknown",
|
295 |
+
"text_to_image": "unknown",
|
296 |
+
"image_to_3d": "unknown",
|
297 |
+
}
|
298 |
+
|
299 |
+
# Check LLM service
|
300 |
+
try:
|
301 |
+
llm_health = self.llm_client.health_check()
|
302 |
+
health["llm"] = (
|
303 |
+
"healthy" if llm_health.get("status") == "healthy" else "unhealthy"
|
304 |
+
)
|
305 |
+
except Exception:
|
306 |
+
health["llm"] = "unavailable"
|
307 |
+
|
308 |
+
# Check text-to-image service
|
309 |
+
try:
|
310 |
+
# Check if the service has a connection
|
311 |
+
if (
|
312 |
+
hasattr(self.text_to_image, "stub")
|
313 |
+
and hasattr(self.text_to_image.stub, "_connections")
|
314 |
+
and self.text_to_image.app_id in self.text_to_image.stub._connections
|
315 |
+
):
|
316 |
+
health["text_to_image"] = "healthy"
|
317 |
+
else:
|
318 |
+
health["text_to_image"] = "degraded"
|
319 |
+
except Exception:
|
320 |
+
health["text_to_image"] = "unavailable"
|
321 |
+
|
322 |
+
# Check image-to-3D service
|
323 |
+
try:
|
324 |
+
# Check if the service has a connection
|
325 |
+
if (
|
326 |
+
hasattr(self.image_to_3d, "stub")
|
327 |
+
and hasattr(self.image_to_3d.stub, "_connections")
|
328 |
+
and self.image_to_3d.app_id in self.image_to_3d.stub._connections
|
329 |
+
):
|
330 |
+
health["image_to_3d"] = "healthy"
|
331 |
+
else:
|
332 |
+
health["image_to_3d"] = "degraded"
|
333 |
+
except Exception:
|
334 |
+
health["image_to_3d"] = "unavailable"
|
335 |
+
|
336 |
+
# Overall health
|
337 |
+
if all(v == "healthy" for v in health.values()):
|
338 |
+
health["pipeline"] = "healthy"
|
339 |
+
elif "unavailable" in health.values():
|
340 |
+
health["pipeline"] = "degraded"
|
341 |
+
else:
|
342 |
+
health["pipeline"] = "partially available"
|
343 |
+
|
344 |
+
return health
|
app/core/remote.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Union
|
2 |
+
|
3 |
+
from openfabric_pysdk.helper import Proxy
|
4 |
+
from openfabric_pysdk.helper.proxy import ExecutionResult
|
5 |
+
|
6 |
+
|
7 |
+
class Remote:
|
8 |
+
"""
|
9 |
+
Remote is a helper class that interfaces with an Openfabric Proxy instance
|
10 |
+
to send input data, execute computations, and fetch results synchronously
|
11 |
+
or asynchronously.
|
12 |
+
|
13 |
+
Attributes:
|
14 |
+
proxy_url (str): The URL to the proxy service.
|
15 |
+
proxy_tag (Optional[str]): An optional tag to identify a specific proxy instance.
|
16 |
+
client (Optional[Proxy]): The initialized proxy client instance.
|
17 |
+
"""
|
18 |
+
|
19 |
+
# ----------------------------------------------------------------------
|
20 |
+
def __init__(self, proxy_url: str, proxy_tag: Optional[str] = None):
|
21 |
+
"""
|
22 |
+
Initializes the Remote instance with the proxy URL and optional tag.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
proxy_url (str): The base URL of the proxy.
|
26 |
+
proxy_tag (Optional[str]): An optional tag for the proxy instance.
|
27 |
+
"""
|
28 |
+
self.proxy_url = proxy_url
|
29 |
+
self.proxy_tag = proxy_tag
|
30 |
+
self.client: Optional[Proxy] = None
|
31 |
+
|
32 |
+
# ----------------------------------------------------------------------
|
33 |
+
def connect(self) -> "Remote":
|
34 |
+
"""
|
35 |
+
Establishes a connection with the proxy by instantiating the Proxy client.
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
Remote: The current instance for chaining.
|
39 |
+
"""
|
40 |
+
# self.client = Proxy(self.proxy_url, self.proxy_tag, ssl_verify=False)
|
41 |
+
|
42 |
+
# Remove ssl_verify parameter since it's not supported by the Proxy class
|
43 |
+
self.client = Proxy(self.proxy_url, self.proxy_tag)
|
44 |
+
return self
|
45 |
+
|
46 |
+
# ----------------------------------------------------------------------
|
47 |
+
def execute(self, inputs: dict, uid: str) -> Union[ExecutionResult, None]:
|
48 |
+
"""
|
49 |
+
Executes an asynchronous request using the proxy client.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
inputs (dict): The input payload to send to the proxy.
|
53 |
+
uid (str): A unique identifier for the request.
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
Union[ExecutionResult, None]: The result of the execution, or None if not connected.
|
57 |
+
"""
|
58 |
+
if self.client is None:
|
59 |
+
return None
|
60 |
+
|
61 |
+
return self.client.request(inputs, uid)
|
62 |
+
|
63 |
+
# ----------------------------------------------------------------------
|
64 |
+
@staticmethod
|
65 |
+
def get_response(output: ExecutionResult) -> Union[dict, None]:
|
66 |
+
"""
|
67 |
+
Waits for the result and processes the output.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
output (ExecutionResult): The result returned from a proxy request.
|
71 |
+
|
72 |
+
Returns:
|
73 |
+
Union[dict, None]: The response data if successful, None otherwise.
|
74 |
+
|
75 |
+
Raises:
|
76 |
+
Exception: If the request failed or was cancelled.
|
77 |
+
"""
|
78 |
+
if output is None:
|
79 |
+
return None
|
80 |
+
|
81 |
+
output.wait()
|
82 |
+
status = str(output.status()).lower()
|
83 |
+
if status == "completed":
|
84 |
+
return output.data()
|
85 |
+
if status in ("cancelled", "failed"):
|
86 |
+
raise Exception("The request to the proxy app failed or was cancelled!")
|
87 |
+
return None
|
88 |
+
|
89 |
+
# ----------------------------------------------------------------------
|
90 |
+
def execute_sync(self, inputs: dict, configs: dict, uid: str) -> Union[dict, None]:
|
91 |
+
"""
|
92 |
+
Executes a synchronous request with configuration parameters.
|
93 |
+
|
94 |
+
Args:
|
95 |
+
inputs (dict): The input payload.
|
96 |
+
configs (dict): Additional configuration parameters.
|
97 |
+
uid (str): A unique identifier for the request.
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
Union[dict, None]: The processed response, or None if not connected.
|
101 |
+
"""
|
102 |
+
if self.client is None:
|
103 |
+
return None
|
104 |
+
|
105 |
+
output = self.client.execute(inputs, configs, uid)
|
106 |
+
return Remote.get_response(output)
|
app/core/stub.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import Any, Dict, List, Literal, Tuple
|
3 |
+
|
4 |
+
import requests
|
5 |
+
|
6 |
+
from .remote import Remote
|
7 |
+
|
8 |
+
# Type aliases for clarity
|
9 |
+
Manifests = Dict[str, dict]
|
10 |
+
Schemas = Dict[str, Tuple[dict, dict]]
|
11 |
+
Connections = Dict[str, Remote]
|
12 |
+
|
13 |
+
|
14 |
+
class Stub:
|
15 |
+
"""
|
16 |
+
Stub acts as a lightweight client interface that initializes remote connections
|
17 |
+
to multiple Openfabric applications, fetching their manifests, schemas, and enabling
|
18 |
+
execution of calls to these apps.
|
19 |
+
|
20 |
+
Attributes:
|
21 |
+
_schema (Schemas): Stores input/output schemas for each app ID.
|
22 |
+
_manifest (Manifests): Stores manifest metadata for each app ID.
|
23 |
+
_connections (Connections): Stores active Remote connections for each app ID.
|
24 |
+
"""
|
25 |
+
|
26 |
+
# ----------------------------------------------------------------------
|
27 |
+
def __init__(self, app_ids: List[str]):
|
28 |
+
"""
|
29 |
+
Initializes the Stub instance by loading manifests, schemas, and connections
|
30 |
+
for each given app ID.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
app_ids (List[str]): A list of application identifiers (hostnames or URLs).
|
34 |
+
"""
|
35 |
+
self._schema: Schemas = {}
|
36 |
+
self._manifest: Manifests = {}
|
37 |
+
self._connections: Connections = {}
|
38 |
+
|
39 |
+
for app_id in app_ids:
|
40 |
+
base_url = app_id.strip("/")
|
41 |
+
|
42 |
+
try:
|
43 |
+
# Fetch manifest
|
44 |
+
manifest = requests.get(f"https://{base_url}/manifest").json()
|
45 |
+
logging.info(f"[{app_id}] Manifest loaded.")
|
46 |
+
self._manifest[app_id] = manifest
|
47 |
+
|
48 |
+
# Fetch input schema
|
49 |
+
input_schema = requests.get(
|
50 |
+
f"https://{base_url}/schema?type=input"
|
51 |
+
).json()
|
52 |
+
logging.info(f"[{app_id}] Input schema loaded.")
|
53 |
+
|
54 |
+
# Fetch output schema
|
55 |
+
output_schema = requests.get(
|
56 |
+
f"https://{base_url}/schema?type=output"
|
57 |
+
).json()
|
58 |
+
logging.info(f"[{app_id}] Output schema loaded.")
|
59 |
+
self._schema[app_id] = (input_schema, output_schema)
|
60 |
+
|
61 |
+
# Establish Remote WebSocket connection
|
62 |
+
self._connections[app_id] = Remote(
|
63 |
+
f"wss://{base_url}", f"{app_id}-proxy"
|
64 |
+
).connect()
|
65 |
+
logging.info(f"[{app_id}] Connection established.")
|
66 |
+
except Exception as e:
|
67 |
+
logging.error(f"[{app_id}] Initialization failed: {e}")
|
68 |
+
|
69 |
+
# ----------------------------------------------------------------------
|
70 |
+
def call(self, app_id: str, data: Any, uid: str = "super-user") -> dict:
|
71 |
+
"""
|
72 |
+
Sends a request to the specified app via its Remote connection.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
app_id (str): The application ID to route the request to.
|
76 |
+
data (Any): The input data to send to the app.
|
77 |
+
uid (str): The unique user/session identifier for tracking (default: 'super-user').
|
78 |
+
|
79 |
+
Returns:
|
80 |
+
dict: The output data returned by the app.
|
81 |
+
|
82 |
+
Raises:
|
83 |
+
Exception: If no connection is found for the provided app ID, or execution fails.
|
84 |
+
"""
|
85 |
+
connection = self._connections.get(app_id)
|
86 |
+
if not connection:
|
87 |
+
raise Exception(f"Connection not found for app ID: {app_id}")
|
88 |
+
|
89 |
+
try:
|
90 |
+
handler = connection.execute(data, uid)
|
91 |
+
result = connection.get_response(handler)
|
92 |
+
logging.info(f"[{app_id}] Output: {result}")
|
93 |
+
return result
|
94 |
+
except Exception as e:
|
95 |
+
logging.error(f"[{app_id}] Execution failed: {e}")
|
96 |
+
raise
|
97 |
+
|
98 |
+
# ----------------------------------------------------------------------
|
99 |
+
def manifest(self, app_id: str) -> dict:
|
100 |
+
"""
|
101 |
+
Retrieves the manifest metadata for a specific application.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
app_id (str): The application ID for which to retrieve the manifest.
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
dict: The manifest data for the app, or an empty dictionary if not found.
|
108 |
+
"""
|
109 |
+
return self._manifest.get(app_id, {})
|
110 |
+
|
111 |
+
# ----------------------------------------------------------------------
|
112 |
+
def schema(self, app_id: str, type: Literal["input", "output"]) -> dict:
|
113 |
+
"""
|
114 |
+
Retrieves the input or output schema for a specific application.
|
115 |
+
|
116 |
+
Args:
|
117 |
+
app_id (str): The application ID for which to retrieve the schema.
|
118 |
+
type (Literal['input', 'output']): The type of schema to retrieve.
|
119 |
+
|
120 |
+
Returns:
|
121 |
+
dict: The requested schema (input or output).
|
122 |
+
|
123 |
+
Raises:
|
124 |
+
ValueError: If the schema type is invalid or the schema is not found.
|
125 |
+
"""
|
126 |
+
_input, _output = self._schema.get(app_id, (None, None))
|
127 |
+
|
128 |
+
if type == "input":
|
129 |
+
if _input is None:
|
130 |
+
raise ValueError(f"Input schema not found for app ID: {app_id}")
|
131 |
+
return _input
|
132 |
+
elif type == "output":
|
133 |
+
if _output is None:
|
134 |
+
raise ValueError(f"Output schema not found for app ID: {app_id}")
|
135 |
+
return _output
|
136 |
+
else:
|
137 |
+
raise ValueError("Type must be either 'input' or 'output'")
|
app/core/text_to_image.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import base64
|
4 |
+
from typing import Dict, Optional, Any, Tuple
|
5 |
+
import json
|
6 |
+
from pathlib import Path
|
7 |
+
import time
|
8 |
+
import uuid
|
9 |
+
import random
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
|
12 |
+
from .stub import Stub
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
logger = logging.getLogger(__name__)
|
17 |
+
|
18 |
+
|
19 |
+
class TextToImageGenerator:
|
20 |
+
"""
|
21 |
+
Handles the text-to-image generation using Openfabric's API.
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(self, stub: Stub, app_id: str = None):
|
25 |
+
"""
|
26 |
+
Initialize the text-to-image generator.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
stub: Stub instance for communicating with Openfabric
|
30 |
+
app_id: The app ID for the text-to-image service (default: from env var)
|
31 |
+
"""
|
32 |
+
self.stub = stub
|
33 |
+
self.app_id = app_id or os.environ.get("TEXT_TO_IMAGE_APP_ID")
|
34 |
+
|
35 |
+
# Use default output directory if IMAGE_OUTPUT_DIR is not set
|
36 |
+
image_output_dir = os.environ.get("IMAGE_OUTPUT_DIR")
|
37 |
+
if image_output_dir is None:
|
38 |
+
# Default to app/data/images
|
39 |
+
self.output_dir = Path(__file__).parent.parent / "data" / "images"
|
40 |
+
logger.warning(
|
41 |
+
f"IMAGE_OUTPUT_DIR not set, using default: {self.output_dir}"
|
42 |
+
)
|
43 |
+
else:
|
44 |
+
self.output_dir = Path(image_output_dir)
|
45 |
+
|
46 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
47 |
+
|
48 |
+
# Cache the schema and manifest - don't raise exceptions to allow fallback mode
|
49 |
+
try:
|
50 |
+
self.input_schema = self.stub.schema(self.app_id, "input")
|
51 |
+
self.output_schema = self.stub.schema(self.app_id, "output")
|
52 |
+
self.manifest = self.stub.manifest(self.app_id)
|
53 |
+
logger.info(
|
54 |
+
f"Successfully loaded schema and manifest for text-to-image app: {self.app_id}"
|
55 |
+
)
|
56 |
+
except Exception as e:
|
57 |
+
logger.warning(f"Failed to load schema/manifest for text-to-image app: {e}")
|
58 |
+
|
59 |
+
def generate(
|
60 |
+
self,
|
61 |
+
prompt: str,
|
62 |
+
params: Optional[Dict[str, Any]] = None,
|
63 |
+
original_prompt: str = None,
|
64 |
+
) -> Tuple[str, str]:
|
65 |
+
"""
|
66 |
+
Generate an image from text prompt.
|
67 |
+
|
68 |
+
Args:
|
69 |
+
prompt: The text prompt (expanded by LLM)
|
70 |
+
params: Additional parameters for image generation
|
71 |
+
original_prompt: The original user prompt (used for naming files)
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
Tuple of (image_path, metadata_path)
|
75 |
+
"""
|
76 |
+
# Use original prompt for naming if provided, otherwise use expanded prompt
|
77 |
+
file_naming_prompt = original_prompt if original_prompt else prompt
|
78 |
+
|
79 |
+
# Prepare the request based on the input schema
|
80 |
+
request_data = self._prepare_request(prompt, params)
|
81 |
+
|
82 |
+
# Log the request
|
83 |
+
logger.info(f"Sending text-to-image request with prompt: {prompt[:100]}...")
|
84 |
+
|
85 |
+
# Send the request to Openfabric
|
86 |
+
result = None
|
87 |
+
try:
|
88 |
+
start_time = time.time()
|
89 |
+
result = self.stub.call(self.app_id, request_data)
|
90 |
+
generation_time = time.time() - start_time
|
91 |
+
logger.info(f"Text-to-image generation completed in {generation_time:.2f}s")
|
92 |
+
except Exception as e:
|
93 |
+
logger.error(f"Failed to generate image: {e}")
|
94 |
+
# Generate a mock response to continue testing
|
95 |
+
|
96 |
+
# result = self._generate_mock_response(prompt, request_data)
|
97 |
+
# logger.warning("Using mock image response due to service error")
|
98 |
+
|
99 |
+
# Process and save the result
|
100 |
+
return self._process_result(result, prompt, file_naming_prompt)
|
101 |
+
|
102 |
+
def _generate_mock_response(
|
103 |
+
self, prompt: str, request_data: Dict[str, Any]
|
104 |
+
) -> Dict[str, Any]:
|
105 |
+
"""
|
106 |
+
Generate a mock image response when the service is unavailable.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
prompt: The text prompt
|
110 |
+
request_data: The original request data
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
A mock response with a simple image
|
114 |
+
"""
|
115 |
+
# Create a 1x1 transparent PNG as mock image
|
116 |
+
mock_image = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
|
117 |
+
|
118 |
+
return {
|
119 |
+
"image": mock_image,
|
120 |
+
"parameters": {
|
121 |
+
"prompt": prompt,
|
122 |
+
"width": request_data.get("width", 512),
|
123 |
+
"height": request_data.get("height", 512),
|
124 |
+
"steps": request_data.get("num_inference_steps", 30),
|
125 |
+
"guidance_scale": request_data.get("guidance_scale", 7.5),
|
126 |
+
"seed": request_data.get("seed", random.randint(1000, 9999)),
|
127 |
+
},
|
128 |
+
}
|
129 |
+
|
130 |
+
def _prepare_request(
|
131 |
+
self, prompt: str, params: Optional[Dict[str, Any]] = None
|
132 |
+
) -> Dict[str, Any]:
|
133 |
+
"""
|
134 |
+
Prepare the request payload based on the app's input schema.
|
135 |
+
"""
|
136 |
+
# Default parameters
|
137 |
+
default_params = {
|
138 |
+
"width": 512,
|
139 |
+
"height": 512,
|
140 |
+
"guidance_scale": 7.5,
|
141 |
+
"num_inference_steps": 30,
|
142 |
+
"seed": -1, # Random seed
|
143 |
+
"negative_prompt": "blurry, low quality, distorted, deformed",
|
144 |
+
}
|
145 |
+
|
146 |
+
# Override defaults with provided params
|
147 |
+
request_params = {**default_params, **(params or {})}
|
148 |
+
|
149 |
+
# Create request based on schema
|
150 |
+
request = {"prompt": prompt, **request_params}
|
151 |
+
|
152 |
+
return request
|
153 |
+
|
154 |
+
def _process_result(
|
155 |
+
self, result: Dict[str, Any], prompt: str, file_naming_prompt: str
|
156 |
+
) -> Tuple[str, str]:
|
157 |
+
"""
|
158 |
+
Process the result from the text-to-image app.
|
159 |
+
|
160 |
+
Args:
|
161 |
+
result: The API response
|
162 |
+
prompt: The original prompt
|
163 |
+
file_naming_prompt: The prompt used for naming files
|
164 |
+
|
165 |
+
Returns:
|
166 |
+
Tuple of (image_path, metadata_path)
|
167 |
+
"""
|
168 |
+
# Extract image data or blob ID
|
169 |
+
try:
|
170 |
+
# Generate a unique ID for this image
|
171 |
+
image_id = str(uuid.uuid4())
|
172 |
+
timestamp = int(time.time())
|
173 |
+
|
174 |
+
# Create a more descriptive base filename from the prompt
|
175 |
+
if file_naming_prompt:
|
176 |
+
# Use first 15 chars of prompt, replacing spaces with underscores
|
177 |
+
base_name = (
|
178 |
+
file_naming_prompt[:15].strip().replace(" ", "_").replace("/", "_")
|
179 |
+
)
|
180 |
+
# Remove any other non-alphanumeric characters
|
181 |
+
base_name = "".join(c for c in base_name if c.isalnum() or c == "_")
|
182 |
+
else:
|
183 |
+
base_name = f"image_{timestamp}"
|
184 |
+
|
185 |
+
# Create paths for metadata
|
186 |
+
metadata_filename = f"{base_name}_{timestamp}.json"
|
187 |
+
metadata_path = self.output_dir / metadata_filename
|
188 |
+
|
189 |
+
# Handle real Openfabric response format (which has 'result' field)
|
190 |
+
if "result" in result:
|
191 |
+
# Log the result ID for reference
|
192 |
+
blob_id = result.get("result")
|
193 |
+
logger.info(f"Image generation result ID: {blob_id}")
|
194 |
+
|
195 |
+
# Create metadata for the image that includes the blob ID
|
196 |
+
# We won't create actual image file path yet since it will be downloaded
|
197 |
+
metadata = {
|
198 |
+
"id": image_id,
|
199 |
+
"timestamp": timestamp,
|
200 |
+
"prompt": prompt,
|
201 |
+
"parameters": result.get("parameters", {}),
|
202 |
+
"result_id": blob_id,
|
203 |
+
"type": "image",
|
204 |
+
"needs_download": True,
|
205 |
+
"base_name": base_name,
|
206 |
+
}
|
207 |
+
|
208 |
+
with open(metadata_path, "w") as meta_file:
|
209 |
+
json.dump(metadata, meta_file, indent=2)
|
210 |
+
|
211 |
+
logger.info(f"Image metadata saved with result ID: {blob_id}")
|
212 |
+
logger.info(f"Use blob_viewer.py to download the actual image")
|
213 |
+
|
214 |
+
# Return the metadata path but no image path since it needs to be downloaded
|
215 |
+
return None, str(metadata_path)
|
216 |
+
|
217 |
+
# If we have direct image data (which would be rare in real use)
|
218 |
+
elif "image" in result:
|
219 |
+
# This is the fallback case if we somehow receive direct image data
|
220 |
+
image_filename = f"{base_name}_{timestamp}.png"
|
221 |
+
image_path = self.output_dir / image_filename
|
222 |
+
|
223 |
+
image_data = result.get("image")
|
224 |
+
if isinstance(image_data, str) and image_data.startswith("data:image"):
|
225 |
+
# Extract base64 data after the comma
|
226 |
+
image_base64 = image_data.split(",", 1)[1]
|
227 |
+
else:
|
228 |
+
image_base64 = image_data
|
229 |
+
|
230 |
+
# Save the image
|
231 |
+
image_bytes = base64.b64decode(image_base64)
|
232 |
+
with open(image_path, "wb") as img_file:
|
233 |
+
img_file.write(image_bytes)
|
234 |
+
|
235 |
+
# Save metadata
|
236 |
+
metadata = {
|
237 |
+
"id": image_id,
|
238 |
+
"timestamp": timestamp,
|
239 |
+
"prompt": prompt,
|
240 |
+
"parameters": result.get("parameters", {}),
|
241 |
+
"file_path": str(image_path),
|
242 |
+
"type": "image",
|
243 |
+
"direct_image": True,
|
244 |
+
}
|
245 |
+
|
246 |
+
with open(metadata_path, "w") as meta_file:
|
247 |
+
json.dump(metadata, meta_file, indent=2)
|
248 |
+
|
249 |
+
logger.info(f"Direct image data saved to {image_path}")
|
250 |
+
return str(image_path), str(metadata_path)
|
251 |
+
|
252 |
+
else:
|
253 |
+
raise KeyError(
|
254 |
+
f"Unexpected response format. Response keys: {list(result.keys())}"
|
255 |
+
)
|
256 |
+
|
257 |
+
except Exception as e:
|
258 |
+
logger.error(f"Failed to process image result: {e}")
|
259 |
+
raise
|
app/data/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app/llm/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app/llm/__init__.py
ADDED
File without changes
|
app/llm/client.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import logging
|
3 |
+
from typing import Optional, Dict, Any
|
4 |
+
|
5 |
+
logger = logging.getLogger(__name__)
|
6 |
+
|
7 |
+
|
8 |
+
class LLMClient:
|
9 |
+
"""
|
10 |
+
Client for interacting with the LLM service.
|
11 |
+
Provides methods to generate text and expand creative prompts.
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self, base_url: str = "http://localhost:8001"):
|
15 |
+
"""
|
16 |
+
Initialize the LLM client.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
base_url: Base URL of the LLM service
|
20 |
+
"""
|
21 |
+
self.base_url = base_url
|
22 |
+
self.session = requests.Session()
|
23 |
+
|
24 |
+
def generate(
|
25 |
+
self,
|
26 |
+
prompt: str,
|
27 |
+
system_prompt: Optional[str] = None,
|
28 |
+
max_tokens: int = 512,
|
29 |
+
temperature: float = 0.7,
|
30 |
+
top_p: float = 0.9,
|
31 |
+
) -> str:
|
32 |
+
"""
|
33 |
+
Generate text based on a prompt.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
prompt: The user prompt to generate from
|
37 |
+
system_prompt: Optional system prompt to guide the generation
|
38 |
+
max_tokens: Maximum number of tokens to generate
|
39 |
+
temperature: Sampling temperature (higher = more creative)
|
40 |
+
top_p: Top-p sampling parameter
|
41 |
+
|
42 |
+
Returns:
|
43 |
+
The generated text
|
44 |
+
|
45 |
+
Raises:
|
46 |
+
Exception: If the request fails
|
47 |
+
"""
|
48 |
+
payload = {
|
49 |
+
"prompt": prompt,
|
50 |
+
"max_tokens": max_tokens,
|
51 |
+
"temperature": temperature,
|
52 |
+
"top_p": top_p,
|
53 |
+
}
|
54 |
+
|
55 |
+
if system_prompt:
|
56 |
+
payload["system_prompt"] = system_prompt
|
57 |
+
|
58 |
+
try:
|
59 |
+
response = self.session.post(f"{self.base_url}/generate", json=payload)
|
60 |
+
response.raise_for_status()
|
61 |
+
return response.json()["text"]
|
62 |
+
except requests.RequestException as e:
|
63 |
+
logger.error(f"Failed to generate text: {str(e)}")
|
64 |
+
raise Exception(f"LLM service error: {str(e)}")
|
65 |
+
|
66 |
+
def expand_prompt(self, prompt: str) -> str:
|
67 |
+
"""
|
68 |
+
Expand a creative prompt with rich details.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
prompt: The user's original prompt
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
An expanded, detailed creative prompt
|
75 |
+
|
76 |
+
Raises:
|
77 |
+
Exception: If the request fails
|
78 |
+
"""
|
79 |
+
try:
|
80 |
+
response = self.session.post(
|
81 |
+
f"{self.base_url}/expand", json={"prompt": prompt}
|
82 |
+
)
|
83 |
+
response.raise_for_status()
|
84 |
+
return response.json()["text"]
|
85 |
+
except requests.RequestException as e:
|
86 |
+
logger.error(f"Failed to expand prompt: {str(e)}")
|
87 |
+
raise Exception(f"LLM service error: {str(e)}")
|
88 |
+
|
89 |
+
def health_check(self) -> Dict[str, Any]:
|
90 |
+
"""
|
91 |
+
Check if the LLM service is healthy.
|
92 |
+
|
93 |
+
Returns:
|
94 |
+
Health status information
|
95 |
+
|
96 |
+
Raises:
|
97 |
+
Exception: If the health check fails
|
98 |
+
"""
|
99 |
+
try:
|
100 |
+
response = self.session.get(f"{self.base_url}/health")
|
101 |
+
response.raise_for_status()
|
102 |
+
return response.json()
|
103 |
+
except requests.RequestException as e:
|
104 |
+
logger.error(f"Health check failed: {str(e)}")
|
105 |
+
raise Exception(f"LLM service error: {str(e)}")
|
app/llm/model.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Dict, List, Optional, Union
|
3 |
+
import logging
|
4 |
+
import torch
|
5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoConfig
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
class LocalLLM:
|
11 |
+
"""
|
12 |
+
A wrapper for running local LLMs using the Hugging Face Transformers library.
|
13 |
+
Optimized for creative prompt expansion and interpretation.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(
|
17 |
+
self,
|
18 |
+
model_path: str = "meta-llama/Llama-3.2-3B-Instruct",
|
19 |
+
device_map: str = "auto",
|
20 |
+
torch_dtype=None,
|
21 |
+
):
|
22 |
+
"""
|
23 |
+
Initialize the local LLM.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
model_path: Path to model or HuggingFace model ID
|
27 |
+
device_map: Device mapping strategy (default: "auto")
|
28 |
+
torch_dtype: Torch data type (default: bfloat16 if available, otherwise float16)
|
29 |
+
"""
|
30 |
+
self.model_path = model_path
|
31 |
+
self.device_map = device_map
|
32 |
+
|
33 |
+
if torch_dtype is None:
|
34 |
+
# Set default dtype based on device
|
35 |
+
if device_map == "mps":
|
36 |
+
# Apple Silicon uses float16
|
37 |
+
self.torch_dtype = torch.float16
|
38 |
+
elif (
|
39 |
+
torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
|
40 |
+
):
|
41 |
+
# Modern NVIDIA GPUs use bfloat16
|
42 |
+
self.torch_dtype = torch.bfloat16
|
43 |
+
else:
|
44 |
+
# Default to float16 for other cases
|
45 |
+
self.torch_dtype = torch.float16
|
46 |
+
else:
|
47 |
+
self.torch_dtype = torch_dtype
|
48 |
+
|
49 |
+
logger.info(f"Loading LLM from {model_path}")
|
50 |
+
logger.info(f"Using device: {device_map}, dtype: {self.torch_dtype}")
|
51 |
+
|
52 |
+
try:
|
53 |
+
# Load model and tokenizer directly instead of using pipeline
|
54 |
+
# This gives us more control over the configuration
|
55 |
+
|
56 |
+
# First, load and fix the config
|
57 |
+
config = AutoConfig.from_pretrained(model_path)
|
58 |
+
|
59 |
+
# Fix the rope_scaling issue for Llama models
|
60 |
+
if hasattr(config, "rope_scaling") and isinstance(
|
61 |
+
config.rope_scaling, dict
|
62 |
+
):
|
63 |
+
# Ensure the type key exists and is set to linear
|
64 |
+
config.rope_scaling["type"] = "linear"
|
65 |
+
logger.info("Fixed rope_scaling configuration with type=linear")
|
66 |
+
elif not hasattr(config, "rope_scaling"):
|
67 |
+
# If no rope_scaling exists, add a basic one
|
68 |
+
config.rope_scaling = {"type": "linear", "factor": 1.0}
|
69 |
+
logger.info("Added default rope_scaling configuration")
|
70 |
+
|
71 |
+
# Load the tokenizer
|
72 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
73 |
+
|
74 |
+
# Load the model with our fixed config
|
75 |
+
if device_map == "mps":
|
76 |
+
# For Apple Silicon, load to device directly
|
77 |
+
model = AutoModelForCausalLM.from_pretrained(
|
78 |
+
model_path,
|
79 |
+
config=config,
|
80 |
+
torch_dtype=self.torch_dtype,
|
81 |
+
device_map={"": "mps"}, # Map all modules to MPS device
|
82 |
+
)
|
83 |
+
else:
|
84 |
+
# For other devices, use the device_map parameter
|
85 |
+
model = AutoModelForCausalLM.from_pretrained(
|
86 |
+
model_path,
|
87 |
+
config=config,
|
88 |
+
torch_dtype=self.torch_dtype,
|
89 |
+
device_map=device_map,
|
90 |
+
)
|
91 |
+
|
92 |
+
# Create the pipeline with our pre-loaded model and tokenizer
|
93 |
+
self.pipe = pipeline(
|
94 |
+
"text-generation", model=model, tokenizer=tokenizer, framework="pt"
|
95 |
+
)
|
96 |
+
|
97 |
+
logger.info("LLM loaded successfully")
|
98 |
+
|
99 |
+
except Exception as e:
|
100 |
+
logger.error(f"Failed to load model: {str(e)}")
|
101 |
+
raise
|
102 |
+
|
103 |
+
def generate(
|
104 |
+
self,
|
105 |
+
prompt: str,
|
106 |
+
system_prompt: Optional[str] = None,
|
107 |
+
max_tokens: int = 512,
|
108 |
+
temperature: float = 0.7,
|
109 |
+
top_p: float = 0.9,
|
110 |
+
) -> str:
|
111 |
+
"""
|
112 |
+
Generate text based on a prompt with the local LLM.
|
113 |
+
|
114 |
+
Args:
|
115 |
+
prompt: The user prompt to generate from
|
116 |
+
system_prompt: Optional system prompt to guide the generation
|
117 |
+
max_tokens: Maximum number of tokens to generate
|
118 |
+
temperature: Sampling temperature (higher = more creative)
|
119 |
+
top_p: Top-p sampling parameter
|
120 |
+
|
121 |
+
Returns:
|
122 |
+
The generated text
|
123 |
+
"""
|
124 |
+
# Format messages for chat-style models
|
125 |
+
messages = []
|
126 |
+
|
127 |
+
# Add system prompt if provided
|
128 |
+
if system_prompt:
|
129 |
+
messages.append({"role": "system", "content": system_prompt})
|
130 |
+
|
131 |
+
# Add user prompt
|
132 |
+
messages.append({"role": "user", "content": prompt})
|
133 |
+
|
134 |
+
logger.debug(f"Generating with prompt: {prompt[:100]}...")
|
135 |
+
|
136 |
+
try:
|
137 |
+
# Generate response using the pipeline
|
138 |
+
outputs = self.pipe(
|
139 |
+
messages,
|
140 |
+
max_new_tokens=max_tokens,
|
141 |
+
temperature=temperature,
|
142 |
+
top_p=top_p,
|
143 |
+
do_sample=True,
|
144 |
+
)
|
145 |
+
|
146 |
+
# Extract the assistant's response
|
147 |
+
response = outputs[0]["generated_text"][-1]["content"]
|
148 |
+
return response
|
149 |
+
|
150 |
+
except Exception as e:
|
151 |
+
logger.error(f"Error during generation: {str(e)}")
|
152 |
+
return ""
|
153 |
+
|
154 |
+
def expand_creative_prompt(self, prompt: str) -> str:
|
155 |
+
"""
|
156 |
+
Specifically designed to expand a user prompt into a more detailed,
|
157 |
+
creative description suitable for image generation.
|
158 |
+
|
159 |
+
Args:
|
160 |
+
prompt: The user's original prompt
|
161 |
+
|
162 |
+
Returns:
|
163 |
+
An expanded, detailed creative prompt
|
164 |
+
"""
|
165 |
+
system_prompt = """You are a creative assistant specializing in enhancing text prompts for image and 3D model generation.
|
166 |
+
When given a simple prompt, expand it with rich, vivid details about:
|
167 |
+
- Visual elements and composition
|
168 |
+
- Lighting, colors, and atmosphere
|
169 |
+
- Style, mood, and artistic influence
|
170 |
+
- Textures and materials
|
171 |
+
- Perspective and framing
|
172 |
+
|
173 |
+
Keep your response focused only on the enhanced visual description without explanations or comments.
|
174 |
+
Limit to 3-4 sentences maximum, ensuring it's concise yet richly detailed."""
|
175 |
+
|
176 |
+
# Generate the expanded prompt
|
177 |
+
expanded = self.generate(
|
178 |
+
prompt=prompt,
|
179 |
+
system_prompt=system_prompt,
|
180 |
+
max_tokens=256,
|
181 |
+
temperature=0.8, # Slightly higher temperature for creativity
|
182 |
+
)
|
183 |
+
|
184 |
+
logger.info(f"Expanded prompt: {expanded[:100]}...")
|
185 |
+
return expanded
|
186 |
+
|
187 |
+
|
188 |
+
def get_llm_instance(model_path: Optional[str] = None) -> LocalLLM:
|
189 |
+
"""
|
190 |
+
Factory function to get a LocalLLM instance with default settings.
|
191 |
+
|
192 |
+
Args:
|
193 |
+
model_path: Optional path to model or HuggingFace model ID
|
194 |
+
|
195 |
+
Returns:
|
196 |
+
A LocalLLM instance
|
197 |
+
"""
|
198 |
+
# If model path not provided, first check for MODEL_PATH, then MODEL_ID from environment
|
199 |
+
if not model_path:
|
200 |
+
model_path = os.environ.get("MODEL_PATH") or os.environ.get(
|
201 |
+
"MODEL_ID", "meta-llama/Llama-3.2-3B-Instruct"
|
202 |
+
)
|
203 |
+
|
204 |
+
# Check if the provided path is a local directory
|
205 |
+
if os.path.isdir(model_path):
|
206 |
+
logger.info(f"Using local model directory: {model_path}")
|
207 |
+
else:
|
208 |
+
logger.info(f"Using model ID from Hugging Face: {model_path}")
|
209 |
+
|
210 |
+
# Check available device backends
|
211 |
+
device_map = "auto"
|
212 |
+
torch_dtype = None
|
213 |
+
|
214 |
+
# Check for Apple Silicon (M1/M2/M3) MPS support
|
215 |
+
if torch.backends.mps.is_available():
|
216 |
+
logger.info(
|
217 |
+
"Apple Silicon MPS is available. Using MPS backend for accelerated inference."
|
218 |
+
)
|
219 |
+
device_map = "mps"
|
220 |
+
torch_dtype = torch.float16
|
221 |
+
# Otherwise check if CUDA is available
|
222 |
+
elif torch.cuda.is_available():
|
223 |
+
logger.info(f"CUDA is available. Using {torch.cuda.get_device_name(0)}")
|
224 |
+
if torch.cuda.get_device_capability()[0] >= 8:
|
225 |
+
# For Ampere architecture (30XX, A100, etc.) use bfloat16
|
226 |
+
torch_dtype = torch.bfloat16
|
227 |
+
else:
|
228 |
+
# For older architectures use float16
|
229 |
+
torch_dtype = torch.float16
|
230 |
+
else:
|
231 |
+
logger.warning(
|
232 |
+
"No GPU acceleration available. Using CPU. This may be slow for inference."
|
233 |
+
)
|
234 |
+
|
235 |
+
return LocalLLM(
|
236 |
+
model_path=model_path, device_map=device_map, torch_dtype=torch_dtype
|
237 |
+
)
|
app/llm/service.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import time
|
4 |
+
import sys
|
5 |
+
from fastapi import FastAPI, HTTPException, Request
|
6 |
+
from fastapi.middleware.cors import CORSMiddleware
|
7 |
+
from pydantic import BaseModel
|
8 |
+
from typing import Optional
|
9 |
+
import psutil
|
10 |
+
import uvicorn
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
from pathlib import Path
|
13 |
+
from model import LocalLLM, get_llm_instance
|
14 |
+
|
15 |
+
# Configure logging first
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.INFO,
|
18 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
19 |
+
handlers=[
|
20 |
+
logging.StreamHandler(sys.stdout),
|
21 |
+
logging.FileHandler(os.path.join(os.path.dirname(__file__), "llm_service.log")),
|
22 |
+
],
|
23 |
+
)
|
24 |
+
logger = logging.getLogger("llm_service")
|
25 |
+
|
26 |
+
# Try to load .env file from project root
|
27 |
+
env_path = Path(__file__).parents[2] / ".env"
|
28 |
+
if env_path.exists():
|
29 |
+
load_dotenv(dotenv_path=env_path)
|
30 |
+
logger.info(f"Loaded environment variables from {env_path}")
|
31 |
+
|
32 |
+
|
33 |
+
# Initialize FastAPI app
|
34 |
+
app = FastAPI(title="Local LLM Service", description="API for local LLM interaction")
|
35 |
+
|
36 |
+
# Add CORS middleware
|
37 |
+
app.add_middleware(
|
38 |
+
CORSMiddleware,
|
39 |
+
allow_origins=["*"],
|
40 |
+
allow_credentials=True,
|
41 |
+
allow_methods=["*"],
|
42 |
+
allow_headers=["*"],
|
43 |
+
)
|
44 |
+
|
45 |
+
|
46 |
+
# Request timing middleware
|
47 |
+
@app.middleware("http")
|
48 |
+
async def log_requests(request: Request, call_next):
|
49 |
+
start_time = time.time()
|
50 |
+
logger.info(f"Request started: {request.method} {request.url.path}")
|
51 |
+
|
52 |
+
response = await call_next(request)
|
53 |
+
|
54 |
+
process_time = time.time() - start_time
|
55 |
+
logger.info(
|
56 |
+
f"Request completed: {request.method} {request.url.path} - Status: {response.status_code} - Duration: {process_time:.4f}s"
|
57 |
+
)
|
58 |
+
|
59 |
+
return response
|
60 |
+
|
61 |
+
|
62 |
+
# Model request and response classes
|
63 |
+
class PromptRequest(BaseModel):
|
64 |
+
prompt: str
|
65 |
+
system_prompt: Optional[str] = None
|
66 |
+
max_tokens: int = 512
|
67 |
+
temperature: float = 0.7
|
68 |
+
top_p: float = 0.9
|
69 |
+
|
70 |
+
|
71 |
+
class ExpandRequest(BaseModel):
|
72 |
+
prompt: str
|
73 |
+
|
74 |
+
|
75 |
+
class LLMResponse(BaseModel):
|
76 |
+
text: str
|
77 |
+
|
78 |
+
|
79 |
+
# Global LLM instance
|
80 |
+
llm = None
|
81 |
+
|
82 |
+
|
83 |
+
@app.on_event("startup")
|
84 |
+
async def startup_event():
|
85 |
+
"""Initialize the LLM on startup"""
|
86 |
+
global llm
|
87 |
+
logger.info("Starting LLM service initialization...")
|
88 |
+
|
89 |
+
# First check for MODEL_PATH (local model), then fall back to MODEL_ID
|
90 |
+
model_path = os.environ.get("MODEL_PATH")
|
91 |
+
if model_path and os.path.isdir(model_path):
|
92 |
+
logger.info(f"Using local model from MODEL_PATH: {model_path}")
|
93 |
+
else:
|
94 |
+
# Fall back to MODEL_ID if MODEL_PATH isn't set or doesn't exist
|
95 |
+
model_path = os.environ.get("MODEL_ID", "meta-llama/Llama-3.2-3B-Instruct")
|
96 |
+
logger.info(f"Using model ID from Hugging Face: {model_path}")
|
97 |
+
|
98 |
+
try:
|
99 |
+
start_time = time.time()
|
100 |
+
llm = get_llm_instance(model_path)
|
101 |
+
init_time = time.time() - start_time
|
102 |
+
|
103 |
+
logger.info(
|
104 |
+
f"LLM initialized successfully with model: {model_path} in {init_time:.2f} seconds"
|
105 |
+
)
|
106 |
+
|
107 |
+
memory = psutil.virtual_memory()
|
108 |
+
logger.info(
|
109 |
+
f"System memory: {memory.percent}% used ({memory.used / (1024**3):.1f}GB / {memory.total / (1024**3):.1f}GB)"
|
110 |
+
)
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
logger.error(f"Failed to initialize LLM: {str(e)}", exc_info=True)
|
114 |
+
raise
|
115 |
+
|
116 |
+
|
117 |
+
@app.post("/generate", response_model=LLMResponse)
|
118 |
+
async def generate_text(request: PromptRequest):
|
119 |
+
"""Generate text based on a prompt"""
|
120 |
+
logger.info(
|
121 |
+
f"Received text generation request, prompt length: {len(request.prompt)} chars"
|
122 |
+
)
|
123 |
+
logger.debug(f"Prompt: {request.prompt[:50]}...")
|
124 |
+
|
125 |
+
if not llm:
|
126 |
+
logger.error("LLM service not initialized when generate endpoint was called")
|
127 |
+
raise HTTPException(status_code=503, detail="LLM service not initialized")
|
128 |
+
|
129 |
+
try:
|
130 |
+
start_time = time.time()
|
131 |
+
|
132 |
+
logger.info(
|
133 |
+
f"Generation parameters: max_tokens={request.max_tokens}, temperature={request.temperature}, top_p={request.top_p}"
|
134 |
+
)
|
135 |
+
|
136 |
+
response = llm.generate(
|
137 |
+
prompt=request.prompt,
|
138 |
+
system_prompt=request.system_prompt,
|
139 |
+
max_tokens=request.max_tokens,
|
140 |
+
temperature=request.temperature,
|
141 |
+
top_p=request.top_p,
|
142 |
+
)
|
143 |
+
|
144 |
+
generation_time = time.time() - start_time
|
145 |
+
response_length = len(response)
|
146 |
+
|
147 |
+
logger.info(
|
148 |
+
f"Text generation completed in {generation_time:.2f}s, response length: {response_length} chars"
|
149 |
+
)
|
150 |
+
logger.debug(f"Generated response: {response[:50]}...")
|
151 |
+
|
152 |
+
return LLMResponse(text=response)
|
153 |
+
except Exception as e:
|
154 |
+
logger.error(f"Error generating text: {str(e)}", exc_info=True)
|
155 |
+
raise HTTPException(status_code=500, detail=str(e))
|
156 |
+
|
157 |
+
|
158 |
+
@app.post("/expand", response_model=LLMResponse)
|
159 |
+
async def expand_prompt(request: ExpandRequest):
|
160 |
+
"""Expand a creative prompt with rich details"""
|
161 |
+
logger.info(f"Received prompt expansion request, prompt: '{request.prompt}'")
|
162 |
+
|
163 |
+
if not llm:
|
164 |
+
logger.error("LLM service not initialized when expand endpoint was called")
|
165 |
+
raise HTTPException(status_code=503, detail="LLM service not initialized")
|
166 |
+
|
167 |
+
try:
|
168 |
+
start_time = time.time()
|
169 |
+
|
170 |
+
expanded = llm.expand_creative_prompt(request.prompt)
|
171 |
+
|
172 |
+
expansion_time = time.time() - start_time
|
173 |
+
expanded_length = len(expanded)
|
174 |
+
|
175 |
+
logger.info(
|
176 |
+
f"Prompt expansion completed in {expansion_time:.2f}s, original length: {len(request.prompt)}, expanded length: {expanded_length}"
|
177 |
+
)
|
178 |
+
logger.debug(f"Original: '{request.prompt}'")
|
179 |
+
logger.debug(f"Expanded: '{expanded}'")
|
180 |
+
|
181 |
+
return LLMResponse(text=expanded)
|
182 |
+
except Exception as e:
|
183 |
+
logger.error(f"Error expanding prompt: {str(e)}", exc_info=True)
|
184 |
+
raise HTTPException(status_code=500, detail=str(e))
|
185 |
+
|
186 |
+
|
187 |
+
@app.get("/health")
|
188 |
+
async def health_check():
|
189 |
+
"""Health check endpoint"""
|
190 |
+
logger.debug("Health check endpoint called")
|
191 |
+
|
192 |
+
if llm:
|
193 |
+
logger.info(f"Health check: LLM service is healthy, model: {llm.model_path}")
|
194 |
+
return {"status": "healthy", "model": llm.model_path}
|
195 |
+
|
196 |
+
logger.warning("Health check: LLM service is still initializing")
|
197 |
+
return {"status": "initializing"}
|
198 |
+
|
199 |
+
|
200 |
+
# Start the service if run directly
|
201 |
+
if __name__ == "__main__":
|
202 |
+
|
203 |
+
# Check for psutil dependency
|
204 |
+
try:
|
205 |
+
import psutil
|
206 |
+
except ImportError:
|
207 |
+
logger.warning(
|
208 |
+
"psutil not installed. Some system resource metrics will not be available."
|
209 |
+
)
|
210 |
+
logger.warning("Install with: pip install psutil")
|
211 |
+
|
212 |
+
logger.info("Starting LLM service server")
|
213 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
app/main.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
from typing import Dict
|
4 |
+
from pathlib import Path
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
from ontology_dc8f06af066e4a7880a5938933236037.config import ConfigClass
|
8 |
+
from ontology_dc8f06af066e4a7880a5938933236037.input import InputClass
|
9 |
+
from ontology_dc8f06af066e4a7880a5938933236037.output import OutputClass
|
10 |
+
from openfabric_pysdk.context import State
|
11 |
+
from core.stub import Stub
|
12 |
+
from core.pipeline import CreativePipeline
|
13 |
+
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
|
17 |
+
# Configure logging
|
18 |
+
logging.basicConfig(
|
19 |
+
level=logging.INFO,
|
20 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
21 |
+
)
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
# Configurations for the app
|
25 |
+
configurations: Dict[str, ConfigClass] = dict()
|
26 |
+
|
27 |
+
|
28 |
+
############################################################
|
29 |
+
# Config callback function
|
30 |
+
############################################################
|
31 |
+
def config(configuration: Dict[str, ConfigClass], state: State) -> None:
|
32 |
+
"""
|
33 |
+
Stores user-specific configuration data.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
configuration (Dict[str, ConfigClass]): A mapping of user IDs to configuration objects.
|
37 |
+
state (State): The current state of the application (not used in this implementation).
|
38 |
+
"""
|
39 |
+
for uid, conf in configuration.items():
|
40 |
+
logging.info(f"Saving new config for user with id:'{uid}'")
|
41 |
+
configurations[uid] = conf
|
42 |
+
|
43 |
+
|
44 |
+
############################################################
|
45 |
+
# Execution callback function
|
46 |
+
############################################################
|
47 |
+
def execute(request_data) -> None:
|
48 |
+
"""
|
49 |
+
Main execution entry point for handling a model pass.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
request_data: The object containing request and response structures.
|
53 |
+
"""
|
54 |
+
|
55 |
+
# Retrieve input
|
56 |
+
request: InputClass = request_data.request
|
57 |
+
user_prompt = request.prompt
|
58 |
+
|
59 |
+
# Log the incoming request
|
60 |
+
logger.info(f"Received request with prompt: '{user_prompt}'")
|
61 |
+
|
62 |
+
# Retrieve user config
|
63 |
+
user_config: ConfigClass = configurations.get("super-user", None)
|
64 |
+
logger.info(f"Using configuration: {configurations}")
|
65 |
+
|
66 |
+
# Initialize the Stub with app IDs
|
67 |
+
app_ids = user_config.app_ids if user_config else []
|
68 |
+
|
69 |
+
# Make sure app IDs are available
|
70 |
+
if not app_ids:
|
71 |
+
text_to_image_app_id = os.environ.get("TEXT_TO_IMAGE_APP_ID")
|
72 |
+
image_to_3d_app_id = os.environ.get("IMAGE_TO_3D_APP_ID")
|
73 |
+
app_ids = [text_to_image_app_id, image_to_3d_app_id]
|
74 |
+
logger.info(
|
75 |
+
f"No app_ids found in config, using environment defaults: {app_ids}"
|
76 |
+
)
|
77 |
+
|
78 |
+
stub = Stub(app_ids)
|
79 |
+
|
80 |
+
# Create the creative pipeline
|
81 |
+
pipeline = CreativePipeline(stub)
|
82 |
+
|
83 |
+
# Execute the creative pipeline
|
84 |
+
try:
|
85 |
+
logger.info(f"Executing creative pipeline for prompt: '{user_prompt}'")
|
86 |
+
result = pipeline.create(prompt=user_prompt)
|
87 |
+
|
88 |
+
if result.success:
|
89 |
+
response_message = (
|
90 |
+
f"Created successfully! From your prompt '{user_prompt}', "
|
91 |
+
f"I generated an image and a 3D model."
|
92 |
+
)
|
93 |
+
logger.info(f"Pipeline completed successfully: {result.to_dict()}")
|
94 |
+
else:
|
95 |
+
if result.image_path:
|
96 |
+
response_message = (
|
97 |
+
f"Partially completed. I was able to generate an image from "
|
98 |
+
f"your prompt '{user_prompt}', but couldn't create the 3D model."
|
99 |
+
)
|
100 |
+
logger.warning(f"Pipeline partially completed: {result.to_dict()}")
|
101 |
+
else:
|
102 |
+
response_message = (
|
103 |
+
f"I'm sorry, I couldn't process your request '{user_prompt}'. "
|
104 |
+
f"Please try again with a different description."
|
105 |
+
)
|
106 |
+
logger.error(f"Pipeline failed: {result.to_dict()}")
|
107 |
+
except Exception as e:
|
108 |
+
logger.error(f"Error executing pipeline: {e}")
|
109 |
+
response_message = f"An error occurred while processing your request: {str(e)}"
|
110 |
+
|
111 |
+
# Prepare response
|
112 |
+
response: OutputClass = request_data.response
|
113 |
+
response.message = response_message
|
app/ontology_dc8f06af066e4a7880a5938933236037/__init__.py
ADDED
File without changes
|
app/ontology_dc8f06af066e4a7880a5938933236037/config.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
from marshmallow import Schema, fields, post_load
|
5 |
+
|
6 |
+
from openfabric_pysdk.utility import SchemaUtil
|
7 |
+
|
8 |
+
|
9 |
+
################################################################
|
10 |
+
# Config concept class - AUTOGENERATED
|
11 |
+
################################################################
|
12 |
+
@dataclass
|
13 |
+
class ConfigClass:
|
14 |
+
app_ids: List[str] = None
|
15 |
+
|
16 |
+
|
17 |
+
################################################################
|
18 |
+
# ConfigSchema concept class - AUTOGENERATED
|
19 |
+
################################################################
|
20 |
+
class ConfigClassSchema(Schema):
|
21 |
+
app_ids = fields.List(fields.String())
|
22 |
+
|
23 |
+
@post_load
|
24 |
+
def create(self, data, **kwargs):
|
25 |
+
return SchemaUtil.create(ConfigClass(), data)
|
app/ontology_dc8f06af066e4a7880a5938933236037/input.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from decimal import *
|
2 |
+
from datetime import *
|
3 |
+
from typing import *
|
4 |
+
|
5 |
+
from dataclasses import dataclass
|
6 |
+
from marshmallow import Schema, fields, post_load
|
7 |
+
from openfabric_pysdk.utility import SchemaUtil
|
8 |
+
|
9 |
+
|
10 |
+
################################################################
|
11 |
+
# Input concept class - AUTOGENERATED
|
12 |
+
################################################################
|
13 |
+
@dataclass
|
14 |
+
class InputClass:
|
15 |
+
prompt: str = None
|
16 |
+
attachments: List[str] = None
|
17 |
+
|
18 |
+
|
19 |
+
################################################################
|
20 |
+
# InputSchema concept class - AUTOGENERATED
|
21 |
+
################################################################
|
22 |
+
class InputClassSchema(Schema):
|
23 |
+
prompt = fields.String(allow_none=True)
|
24 |
+
attachments = fields.List(fields.String(allow_none=True), allow_none=True)
|
25 |
+
|
26 |
+
@post_load
|
27 |
+
def create(self, data, **kwargs):
|
28 |
+
return SchemaUtil.create(InputClass(), data)
|
app/ontology_dc8f06af066e4a7880a5938933236037/output.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from marshmallow import Schema, fields, post_load
|
3 |
+
|
4 |
+
# Removed import as Resource doesn't exist in this version of openfabric_pysdk
|
5 |
+
# from openfabric_pysdk.fields import Resource
|
6 |
+
from openfabric_pysdk.utility import SchemaUtil
|
7 |
+
|
8 |
+
|
9 |
+
################################################################
|
10 |
+
# Output concept class - AUTOGENERATED
|
11 |
+
################################################################
|
12 |
+
@dataclass
|
13 |
+
class OutputClass:
|
14 |
+
message: str = None
|
15 |
+
|
16 |
+
|
17 |
+
################################################################
|
18 |
+
# OutputSchema concept class - AUTOGENERATED
|
19 |
+
################################################################
|
20 |
+
class OutputClassSchema(Schema):
|
21 |
+
message = fields.Str(allow_none=True)
|
22 |
+
|
23 |
+
@post_load
|
24 |
+
def create(self, data, **kwargs):
|
25 |
+
return SchemaUtil.create(OutputClass(), data)
|
app/tools/__init__.py
ADDED
File without changes
|
app/tools/blob_viewer.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Openfabric Blob Viewer - A utility for viewing and downloading resources from Openfabric
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
python blob_viewer.py view <data_blob_id> [<execution_id>]
|
7 |
+
python blob_viewer.py download <data_blob_id> [<execution_id>]
|
8 |
+
|
9 |
+
Examples:
|
10 |
+
# View an image directly in browser
|
11 |
+
python blob_viewer.py view data_blob_1d9d210d20c1e75ea6a3855b6d10341fd8f125b49866b61b7ae94f8fa4bffd49 2d529306be574949a2a3d2f9d9e4082b
|
12 |
+
|
13 |
+
# Download a resource
|
14 |
+
python blob_viewer.py download data_blob_1d9d210d20c1e75ea6a3855b6d10341fd8f125b49866b61b7ae94f8fa4bffd49 2d529306be574949a2a3d2f9d9e4082b
|
15 |
+
"""
|
16 |
+
|
17 |
+
import os
|
18 |
+
import sys
|
19 |
+
import argparse
|
20 |
+
import webbrowser
|
21 |
+
import requests
|
22 |
+
from pathlib import Path
|
23 |
+
from dotenv import load_dotenv
|
24 |
+
import base64
|
25 |
+
import json
|
26 |
+
from datetime import datetime
|
27 |
+
|
28 |
+
# Make sure we can import from our app
|
29 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
30 |
+
|
31 |
+
# Load environment variables
|
32 |
+
load_dotenv()
|
33 |
+
|
34 |
+
# Configure output directory for downloads
|
35 |
+
OUTPUT_DIR = Path(__file__).parent.parent / "data" / "downloads"
|
36 |
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
37 |
+
|
38 |
+
# Get app IDs from environment
|
39 |
+
TEXT_TO_IMAGE_APP_ID = os.environ.get("TEXT_TO_IMAGE_APP_ID")
|
40 |
+
IMAGE_TO_3D_APP_ID = os.environ.get("IMAGE_TO_3D_APP_ID")
|
41 |
+
|
42 |
+
|
43 |
+
def construct_resource_url(data_blob_id, execution_id=None):
|
44 |
+
"""
|
45 |
+
Construct the URL for accessing a resource from a data_blob ID
|
46 |
+
|
47 |
+
Args:
|
48 |
+
data_blob_id: The data_blob ID (can be full path or just the ID)
|
49 |
+
execution_id: Optional execution ID
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
URL to access the resource
|
53 |
+
"""
|
54 |
+
# Extract the actual blob ID if provided with path format
|
55 |
+
if "/" in data_blob_id:
|
56 |
+
parts = data_blob_id.split("/")
|
57 |
+
data_blob_id = parts[0]
|
58 |
+
if len(parts) > 2 and not execution_id:
|
59 |
+
execution_id = parts[2]
|
60 |
+
|
61 |
+
# Create the reid parameter value
|
62 |
+
reid = data_blob_id
|
63 |
+
if execution_id:
|
64 |
+
reid = f"{data_blob_id}/executions/{execution_id}"
|
65 |
+
|
66 |
+
# Format the URL correctly based on the example
|
67 |
+
base_url = f"https://{TEXT_TO_IMAGE_APP_ID}/resource?reid={reid}"
|
68 |
+
|
69 |
+
return base_url
|
70 |
+
|
71 |
+
|
72 |
+
def open_in_browser(data_blob_id, execution_id=None):
|
73 |
+
"""Open a resource directly in the web browser"""
|
74 |
+
url = construct_resource_url(data_blob_id, execution_id)
|
75 |
+
print(f"Opening URL in browser: {url}")
|
76 |
+
webbrowser.open(url)
|
77 |
+
|
78 |
+
|
79 |
+
def download_resource(
|
80 |
+
data_blob_id, execution_id=None, prompt=None, target_dir=None, metadata=None
|
81 |
+
):
|
82 |
+
"""
|
83 |
+
Download a resource from the given data_blob ID
|
84 |
+
|
85 |
+
Args:
|
86 |
+
data_blob_id: The data_blob ID
|
87 |
+
execution_id: Optional execution ID
|
88 |
+
prompt: Optional prompt text to use in filename
|
89 |
+
target_dir: Optional target directory to save to (defaults to downloads)
|
90 |
+
metadata: Optional metadata to save alongside the downloaded file
|
91 |
+
"""
|
92 |
+
url = construct_resource_url(data_blob_id, execution_id)
|
93 |
+
|
94 |
+
# Use downloads directory as default if not specified
|
95 |
+
if target_dir:
|
96 |
+
output_dir = Path(target_dir)
|
97 |
+
else:
|
98 |
+
output_dir = OUTPUT_DIR
|
99 |
+
|
100 |
+
# Ensure the output directory exists
|
101 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
102 |
+
|
103 |
+
try:
|
104 |
+
print(f"Downloading from: {url}")
|
105 |
+
response = requests.get(url)
|
106 |
+
|
107 |
+
if response.status_code == 200:
|
108 |
+
# Determine content type and extension
|
109 |
+
content_type = response.headers.get(
|
110 |
+
"Content-Type", "application/octet-stream"
|
111 |
+
)
|
112 |
+
|
113 |
+
# Choose file extension based on content type
|
114 |
+
extension = "bin" # Default extension
|
115 |
+
if "image/png" in content_type:
|
116 |
+
extension = "png"
|
117 |
+
elif "image/jpeg" in content_type:
|
118 |
+
extension = "jpg"
|
119 |
+
elif (
|
120 |
+
"model/gltf+json" in content_type or "application/json" in content_type
|
121 |
+
):
|
122 |
+
extension = "gltf"
|
123 |
+
elif "model/gltf-binary" in content_type:
|
124 |
+
extension = "glb"
|
125 |
+
|
126 |
+
# Create filename based on prompt if available
|
127 |
+
if prompt:
|
128 |
+
# Use first 15 chars of prompt, replacing spaces with underscores
|
129 |
+
base_name = prompt[:15].strip().replace(" ", "_").replace("/", "_")
|
130 |
+
# Remove any other non-alphanumeric characters
|
131 |
+
base_name = "".join(c for c in base_name if c.isalnum() or c == "_")
|
132 |
+
|
133 |
+
# Add timestamp for uniqueness
|
134 |
+
timestamp = int(datetime.now().timestamp())
|
135 |
+
filename = f"{base_name}_{timestamp}.{extension}"
|
136 |
+
|
137 |
+
# Also create metadata filename
|
138 |
+
metadata_filename = f"{base_name}_{timestamp}.json"
|
139 |
+
else:
|
140 |
+
# Fallback to timestamp and blob ID if no prompt
|
141 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
142 |
+
clean_blob_id = data_blob_id.replace("/", "_")
|
143 |
+
|
144 |
+
if execution_id:
|
145 |
+
filename = f"{timestamp}_{clean_blob_id[:8]}_{execution_id[:8]}.{extension}"
|
146 |
+
else:
|
147 |
+
filename = f"{timestamp}_{clean_blob_id[:8]}.{extension}"
|
148 |
+
|
149 |
+
# Also create metadata filename
|
150 |
+
metadata_filename = filename.replace(f".{extension}", ".json")
|
151 |
+
|
152 |
+
output_path = output_dir / filename
|
153 |
+
metadata_path = output_dir / metadata_filename
|
154 |
+
|
155 |
+
# Save the file
|
156 |
+
with open(output_path, "wb") as f:
|
157 |
+
f.write(response.content)
|
158 |
+
|
159 |
+
# Create and save metadata
|
160 |
+
if metadata:
|
161 |
+
metadata["download_timestamp"] = int(datetime.now().timestamp())
|
162 |
+
metadata["download_source"] = url
|
163 |
+
metadata["file_path"] = str(output_path)
|
164 |
+
|
165 |
+
with open(metadata_path, "w") as f:
|
166 |
+
json.dump(metadata, f, indent=2)
|
167 |
+
|
168 |
+
print(f"Successfully downloaded to: {output_path}")
|
169 |
+
return str(output_path)
|
170 |
+
else:
|
171 |
+
print(f"Failed to download resource. Status code: {response.status_code}")
|
172 |
+
print(f"Response: {response.text}")
|
173 |
+
return None
|
174 |
+
|
175 |
+
except Exception as e:
|
176 |
+
print(f"Error downloading resource: {str(e)}")
|
177 |
+
return None
|
178 |
+
|
179 |
+
|
180 |
+
def parse_args():
|
181 |
+
"""Parse command line arguments"""
|
182 |
+
parser = argparse.ArgumentParser(description="Openfabric Blob Viewer")
|
183 |
+
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
184 |
+
|
185 |
+
# View command
|
186 |
+
view_parser = subparsers.add_parser("view", help="View a blob in browser")
|
187 |
+
view_parser.add_argument("data_blob_id", help="Blob ID or full path")
|
188 |
+
view_parser.add_argument("execution_id", nargs="?", help="Execution ID (optional)")
|
189 |
+
|
190 |
+
# Download command
|
191 |
+
download_parser = subparsers.add_parser("download", help="Download a blob")
|
192 |
+
download_parser.add_argument("data_blob_id", help="Blob ID or full path")
|
193 |
+
download_parser.add_argument(
|
194 |
+
"execution_id", nargs="?", help="Execution ID (optional)"
|
195 |
+
)
|
196 |
+
|
197 |
+
return parser.parse_args()
|
198 |
+
|
199 |
+
|
200 |
+
def main():
|
201 |
+
args = parse_args()
|
202 |
+
|
203 |
+
if not args.command:
|
204 |
+
print(__doc__)
|
205 |
+
return
|
206 |
+
|
207 |
+
if args.command == "view":
|
208 |
+
open_in_browser(args.data_blob_id, args.execution_id)
|
209 |
+
elif args.command == "download":
|
210 |
+
download_resource(args.data_blob_id, args.execution_id)
|
211 |
+
else:
|
212 |
+
print(__doc__)
|
213 |
+
|
214 |
+
|
215 |
+
if __name__ == "__main__":
|
216 |
+
main()
|
app/ui/app.py
ADDED
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
"""
|
3 |
+
AI Creative Application UI - Modified for stability and Hugging Face Spaces compatibility
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import logging
|
9 |
+
import time
|
10 |
+
import json
|
11 |
+
from pathlib import Path
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
import gradio as gr
|
14 |
+
|
15 |
+
# Configure logging
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
18 |
+
)
|
19 |
+
logger = logging.getLogger("ui")
|
20 |
+
|
21 |
+
# Add the parent directory to sys.path to import app modules
|
22 |
+
sys.path.append(str(Path(__file__).parent.parent))
|
23 |
+
|
24 |
+
# Set environment variable to disable SSL verification for httpx
|
25 |
+
os.environ["HTTPX_VERIFY"] = "0"
|
26 |
+
|
27 |
+
# Detect if running in Hugging Face Spaces
|
28 |
+
RUNNING_IN_SPACES = os.environ.get("HF_SPACES", "0") == "1"
|
29 |
+
|
30 |
+
# Load environment variables if .env exists and not in HF Spaces
|
31 |
+
if not RUNNING_IN_SPACES:
|
32 |
+
dotenv_path = Path(__file__).parent.parent.parent / ".env"
|
33 |
+
if dotenv_path.exists():
|
34 |
+
load_dotenv(dotenv_path=dotenv_path)
|
35 |
+
logger.info(f"Loaded environment from {dotenv_path}")
|
36 |
+
else:
|
37 |
+
logger.warning(f".env file not found at {dotenv_path}")
|
38 |
+
else:
|
39 |
+
logger.info("Running in Hugging Face Spaces environment")
|
40 |
+
|
41 |
+
# Conditionally import app modules with error handling
|
42 |
+
try:
|
43 |
+
from core.pipeline import CreativePipeline, PipelineResult
|
44 |
+
from core.stub import Stub
|
45 |
+
|
46 |
+
CORE_MODULES_AVAILABLE = True
|
47 |
+
except ImportError as e:
|
48 |
+
logger.error(f"Failed to import core modules: {str(e)}")
|
49 |
+
CORE_MODULES_AVAILABLE = False
|
50 |
+
|
51 |
+
# Get app IDs from environment with defaults
|
52 |
+
TEXT_TO_IMAGE_APP_ID = os.environ.get(
|
53 |
+
"TEXT_TO_IMAGE_APP_ID", "c25dcd829d134ea98f5ae4dd311d13bc.node3.openfabric.network"
|
54 |
+
)
|
55 |
+
IMAGE_TO_3D_APP_ID = os.environ.get(
|
56 |
+
"IMAGE_TO_3D_APP_ID", "f0b5f319156c4819b9827000b17e511a.node3.openfabric.network"
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
def main():
|
61 |
+
"""AI Creative application interface"""
|
62 |
+
|
63 |
+
# Paths for saving generated content - adjust for HF Spaces environment
|
64 |
+
if RUNNING_IN_SPACES:
|
65 |
+
data_path = Path("/tmp/data")
|
66 |
+
else:
|
67 |
+
data_path = Path(__file__).parent.parent / "data"
|
68 |
+
|
69 |
+
images_path = data_path / "images"
|
70 |
+
models_path = data_path / "models"
|
71 |
+
|
72 |
+
# Ensure necessary directories exist
|
73 |
+
images_path.mkdir(exist_ok=True, parents=True)
|
74 |
+
models_path.mkdir(exist_ok=True, parents=True)
|
75 |
+
|
76 |
+
# Initialize pipeline only if modules are available
|
77 |
+
pipeline = None
|
78 |
+
if CORE_MODULES_AVAILABLE:
|
79 |
+
try:
|
80 |
+
# Configure app IDs - use both TEXT_TO_IMAGE and IMAGE_TO_3D
|
81 |
+
app_ids = []
|
82 |
+
if TEXT_TO_IMAGE_APP_ID:
|
83 |
+
app_ids.append(TEXT_TO_IMAGE_APP_ID)
|
84 |
+
if IMAGE_TO_3D_APP_ID:
|
85 |
+
app_ids.append(IMAGE_TO_3D_APP_ID)
|
86 |
+
|
87 |
+
logger.info(f"Using app IDs: {app_ids}")
|
88 |
+
stub = Stub(app_ids=app_ids)
|
89 |
+
pipeline = CreativePipeline(stub)
|
90 |
+
logger.info("Pipeline initialized successfully")
|
91 |
+
except Exception as e:
|
92 |
+
logger.error(f"Failed to initialize pipeline: {str(e)}")
|
93 |
+
|
94 |
+
def generate_from_prompt(prompt, creative_strength=0.7):
|
95 |
+
"""
|
96 |
+
Generate image from text prompt - Using a simpler return format to avoid Pydantic issues
|
97 |
+
"""
|
98 |
+
if not prompt:
|
99 |
+
return "Please enter a prompt", None, None, "", ""
|
100 |
+
|
101 |
+
if not pipeline:
|
102 |
+
return (
|
103 |
+
"Services not available. Please check server status.",
|
104 |
+
None,
|
105 |
+
None,
|
106 |
+
"",
|
107 |
+
"",
|
108 |
+
)
|
109 |
+
|
110 |
+
try:
|
111 |
+
# Parameters for generation
|
112 |
+
params = {
|
113 |
+
"image": {
|
114 |
+
"creative_strength": creative_strength,
|
115 |
+
},
|
116 |
+
"model": {"quality": "standard"},
|
117 |
+
}
|
118 |
+
|
119 |
+
# Update status immediately
|
120 |
+
status_msg = "Generating image from your prompt..."
|
121 |
+
|
122 |
+
# Run the creative pipeline
|
123 |
+
result = pipeline.create(prompt, params)
|
124 |
+
|
125 |
+
# Handle failed generation
|
126 |
+
if not result.success and not result.image_path:
|
127 |
+
return "Failed to generate image from prompt", None, None, "", ""
|
128 |
+
|
129 |
+
# Process successful generation
|
130 |
+
image_info = f"Original prompt: {result.original_prompt}\n"
|
131 |
+
if (
|
132 |
+
hasattr(result, "expanded_prompt")
|
133 |
+
and result.expanded_prompt
|
134 |
+
and result.expanded_prompt != result.original_prompt
|
135 |
+
):
|
136 |
+
image_info += f"Enhanced prompt: {result.expanded_prompt}\n"
|
137 |
+
|
138 |
+
# Check for image path
|
139 |
+
image_path = result.image_path if hasattr(result, "image_path") else None
|
140 |
+
|
141 |
+
# Check for 3D model
|
142 |
+
model_path = (
|
143 |
+
result.model_path
|
144 |
+
if hasattr(result, "model_path") and result.model_path
|
145 |
+
else None
|
146 |
+
)
|
147 |
+
model_info = ""
|
148 |
+
|
149 |
+
if model_path:
|
150 |
+
model_info = f"3D model generated from image.\n"
|
151 |
+
model_info += f"Model format: {Path(model_path).suffix[1:]}"
|
152 |
+
status_msg = "Image and 3D model generated successfully!"
|
153 |
+
else:
|
154 |
+
status_msg = "Image generated successfully!"
|
155 |
+
|
156 |
+
return status_msg, image_path, model_path, image_info, model_info
|
157 |
+
|
158 |
+
except Exception as e:
|
159 |
+
logger.error(f"Generation error: {str(e)}")
|
160 |
+
return f"Error: {str(e)}", None, None, "", ""
|
161 |
+
|
162 |
+
def list_gallery_items():
|
163 |
+
"""List available images in the gallery"""
|
164 |
+
images = list(images_path.glob("*.png")) + list(images_path.glob("*.jpg"))
|
165 |
+
return sorted(
|
166 |
+
[(str(img), img.stem) for img in images], key=lambda x: x[1], reverse=True
|
167 |
+
)
|
168 |
+
|
169 |
+
with gr.Blocks(title="AI Creative Studio") as demo:
|
170 |
+
gr.Markdown("# AI Creative Studio")
|
171 |
+
gr.Markdown("Generate images from text descriptions")
|
172 |
+
|
173 |
+
with gr.Tab("Create"):
|
174 |
+
with gr.Row():
|
175 |
+
with gr.Column(scale=2):
|
176 |
+
# Input area
|
177 |
+
prompt_input = gr.Textbox(
|
178 |
+
label="Your creative prompt",
|
179 |
+
placeholder="Describe what you want to create...",
|
180 |
+
lines=3,
|
181 |
+
)
|
182 |
+
|
183 |
+
with gr.Row():
|
184 |
+
creative_strength = gr.Slider(
|
185 |
+
label="Creative Strength",
|
186 |
+
minimum=0.0,
|
187 |
+
maximum=1.0,
|
188 |
+
value=0.7,
|
189 |
+
step=0.1,
|
190 |
+
)
|
191 |
+
|
192 |
+
generate_btn = gr.Button("Generate", variant="primary")
|
193 |
+
status = gr.Textbox(label="Status", interactive=False)
|
194 |
+
|
195 |
+
with gr.Column(scale=3):
|
196 |
+
# Output area with tabs for different views
|
197 |
+
with gr.Tab("Image"):
|
198 |
+
with gr.Row():
|
199 |
+
image_output = gr.Image(
|
200 |
+
label="Generated Image", type="filepath"
|
201 |
+
)
|
202 |
+
image_info = gr.Textbox(
|
203 |
+
label="Image Details", interactive=False, lines=3
|
204 |
+
)
|
205 |
+
|
206 |
+
with gr.Tab("3D Model"):
|
207 |
+
with gr.Row():
|
208 |
+
model_viewer = gr.Model3D(label="3D Model")
|
209 |
+
model_info = gr.Textbox(
|
210 |
+
label="Model Details", interactive=False, lines=3
|
211 |
+
)
|
212 |
+
|
213 |
+
with gr.Tab("Gallery"):
|
214 |
+
# Function to update the image gallery
|
215 |
+
def update_image_gallery():
|
216 |
+
images = list(images_path.glob("*.png")) + list(
|
217 |
+
images_path.glob("*.jpg")
|
218 |
+
)
|
219 |
+
return sorted(
|
220 |
+
[str(img) for img in images],
|
221 |
+
key=lambda x: os.path.getmtime(x) if os.path.exists(x) else 0,
|
222 |
+
reverse=True,
|
223 |
+
)
|
224 |
+
|
225 |
+
# Function to update the models gallery and return both the models list and model paths
|
226 |
+
def update_models_gallery():
|
227 |
+
models = list(models_path.glob("*.glb")) + list(
|
228 |
+
models_path.glob("*.gltf")
|
229 |
+
)
|
230 |
+
model_data = []
|
231 |
+
model_paths = [] # Store just the paths for easy access by index
|
232 |
+
|
233 |
+
for model_path in sorted(
|
234 |
+
models,
|
235 |
+
key=lambda x: os.path.getmtime(x) if os.path.exists(x) else 0,
|
236 |
+
reverse=True,
|
237 |
+
):
|
238 |
+
# Try to load metadata file if available
|
239 |
+
metadata_path = model_path.with_suffix(".json")
|
240 |
+
creation_time = time.strftime(
|
241 |
+
"%Y-%m-%d %H:%M", time.localtime(os.path.getmtime(model_path))
|
242 |
+
)
|
243 |
+
|
244 |
+
if metadata_path.exists():
|
245 |
+
try:
|
246 |
+
with open(metadata_path, "r") as f:
|
247 |
+
metadata = json.load(f)
|
248 |
+
source_image = metadata.get(
|
249 |
+
"source_image_filename", "Unknown"
|
250 |
+
)
|
251 |
+
format_type = metadata.get("format", model_path.suffix[1:])
|
252 |
+
except Exception as e:
|
253 |
+
logger.error(
|
254 |
+
f"Failed to read metadata for {model_path}: {e}"
|
255 |
+
)
|
256 |
+
source_image = "Unknown"
|
257 |
+
format_type = model_path.suffix[1:]
|
258 |
+
else:
|
259 |
+
source_image = "Unknown"
|
260 |
+
format_type = model_path.suffix[1:]
|
261 |
+
|
262 |
+
# Add to data table and path list
|
263 |
+
model_paths.append(str(model_path))
|
264 |
+
model_data.append(
|
265 |
+
[
|
266 |
+
str(model_path),
|
267 |
+
source_image,
|
268 |
+
format_type,
|
269 |
+
creation_time,
|
270 |
+
]
|
271 |
+
)
|
272 |
+
|
273 |
+
return model_data, model_paths
|
274 |
+
|
275 |
+
# Function to view model by index instead of relying on DataFrame selection
|
276 |
+
def view_model_by_index(evt: gr.SelectData):
|
277 |
+
if (
|
278 |
+
not hasattr(view_model_by_index, "model_paths")
|
279 |
+
or not view_model_by_index.model_paths
|
280 |
+
):
|
281 |
+
logger.warning("No model paths available")
|
282 |
+
return None, None
|
283 |
+
|
284 |
+
try:
|
285 |
+
# Get the index from the selection event
|
286 |
+
row_index = (
|
287 |
+
evt.index[0] if hasattr(evt, "index") and evt.index else 0
|
288 |
+
)
|
289 |
+
if row_index < 0 or row_index >= len(
|
290 |
+
view_model_by_index.model_paths
|
291 |
+
):
|
292 |
+
logger.warning(f"Invalid model index: {row_index}")
|
293 |
+
return None, None
|
294 |
+
|
295 |
+
# Get the model path from our saved list
|
296 |
+
model_path = view_model_by_index.model_paths[row_index]
|
297 |
+
logger.info(f"Selected model at index {row_index}: {model_path}")
|
298 |
+
|
299 |
+
if not model_path or not os.path.exists(model_path):
|
300 |
+
logger.warning(f"Model file not found: {model_path}")
|
301 |
+
return None, None
|
302 |
+
|
303 |
+
except (IndexError, AttributeError, TypeError) as e:
|
304 |
+
logger.error(f"Error accessing selected model: {e}")
|
305 |
+
return None, None
|
306 |
+
|
307 |
+
# Get model metadata if available
|
308 |
+
metadata_path = Path(model_path).with_suffix(".json")
|
309 |
+
metadata = {}
|
310 |
+
|
311 |
+
if metadata_path.exists():
|
312 |
+
try:
|
313 |
+
with open(metadata_path, "r") as f:
|
314 |
+
metadata = json.load(f)
|
315 |
+
logger.info(f"Loaded metadata for model: {model_path}")
|
316 |
+
except Exception as e:
|
317 |
+
logger.error(f"Failed to read metadata for {model_path}: {e}")
|
318 |
+
else:
|
319 |
+
logger.warning(f"No metadata file found for model: {metadata_path}")
|
320 |
+
|
321 |
+
return model_path, metadata
|
322 |
+
|
323 |
+
# Function to store model paths in the view function's namespace
|
324 |
+
def store_model_paths(model_data, model_paths):
|
325 |
+
view_model_by_index.model_paths = model_paths
|
326 |
+
return model_data
|
327 |
+
|
328 |
+
with gr.Tabs() as gallery_tabs:
|
329 |
+
with gr.Tab("Images"):
|
330 |
+
image_gallery = gr.Gallery(
|
331 |
+
label="Generated Images",
|
332 |
+
columns=4,
|
333 |
+
object_fit="contain",
|
334 |
+
height="auto",
|
335 |
+
)
|
336 |
+
refresh_img_btn = gr.Button("Refresh Images")
|
337 |
+
|
338 |
+
with gr.Tab("3D Models"):
|
339 |
+
models_list = gr.Dataframe(
|
340 |
+
headers=["Model", "Source Image", "Format", "Created"],
|
341 |
+
label="Available 3D Models",
|
342 |
+
row_count=10,
|
343 |
+
col_count=(4, "fixed"),
|
344 |
+
interactive=False,
|
345 |
+
)
|
346 |
+
with gr.Row():
|
347 |
+
selected_model = gr.Model3D(label="Selected 3D Model")
|
348 |
+
model_details = gr.JSON(label="Model Details")
|
349 |
+
|
350 |
+
refresh_models_btn = gr.Button("Refresh Models")
|
351 |
+
# Removed the "View Selected Model" button that was causing errors
|
352 |
+
|
353 |
+
# Make the dataframe selection trigger the model loading automatically
|
354 |
+
models_list.select(
|
355 |
+
fn=view_model_by_index, outputs=[selected_model, model_details]
|
356 |
+
)
|
357 |
+
|
358 |
+
# Wire up the gallery refresh buttons
|
359 |
+
refresh_img_btn.click(fn=update_image_gallery, outputs=[image_gallery])
|
360 |
+
refresh_models_btn.click(
|
361 |
+
fn=lambda: store_model_paths(*update_models_gallery()),
|
362 |
+
outputs=[models_list],
|
363 |
+
)
|
364 |
+
|
365 |
+
# Removed the view_model_btn.click implementation since we're removing the button
|
366 |
+
|
367 |
+
# Initial gallery loads
|
368 |
+
demo.load(update_image_gallery, outputs=[image_gallery])
|
369 |
+
demo.load(
|
370 |
+
fn=lambda: store_model_paths(*update_models_gallery()),
|
371 |
+
outputs=[models_list],
|
372 |
+
)
|
373 |
+
|
374 |
+
# Wire up the generate button - non-streaming mode to avoid Pydantic issues
|
375 |
+
generate_btn.click(
|
376 |
+
fn=generate_from_prompt,
|
377 |
+
inputs=[prompt_input, creative_strength],
|
378 |
+
outputs=[
|
379 |
+
status,
|
380 |
+
image_output,
|
381 |
+
model_viewer,
|
382 |
+
image_info,
|
383 |
+
model_info,
|
384 |
+
],
|
385 |
+
)
|
386 |
+
|
387 |
+
# Initial gallery load
|
388 |
+
demo.load(update_image_gallery, outputs=[image_gallery])
|
389 |
+
demo.load(
|
390 |
+
fn=lambda: store_model_paths(*update_models_gallery()),
|
391 |
+
outputs=[models_list],
|
392 |
+
)
|
393 |
+
|
394 |
+
# Launch the UI with parameters compatible with Gradio 4.26.0
|
395 |
+
port = int(os.environ.get("UI_PORT", 7860))
|
396 |
+
logger.info(f"Launching UI on port {port}")
|
397 |
+
demo.launch(
|
398 |
+
server_name="0.0.0.0",
|
399 |
+
server_port=port,
|
400 |
+
share=True,
|
401 |
+
show_error=True,
|
402 |
+
# Removed api_mode parameter that's not supported in 4.26.0
|
403 |
+
)
|
404 |
+
|
405 |
+
|
406 |
+
if __name__ == "__main__":
|
407 |
+
main()
|
onto/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
onto/dc8f06af066e4a7880a5938933236037/.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
onto/dc8f06af066e4a7880a5938933236037/connection/ConfigClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "connection",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "82a958bf5a277217a679c14824974c2f",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define connection for ConfigClass concept",
|
8 |
+
"connections" : { },
|
9 |
+
"locations" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/connection/InputClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "connection",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "00f7a3dfaaa9073f029302b9fbb4191e",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define connection for InputClass concept",
|
8 |
+
"connections" : { },
|
9 |
+
"locations" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/connection/OutputClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "connection",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "523a8eb9ad1eb66ba6d1add87742cee5",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define connection for OutputClass concept",
|
8 |
+
"connections" : { },
|
9 |
+
"locations" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/defaults/ConfigClass.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "defaults",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "82a958bf5a277217a679c14824974c2f",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define defaults for ConfigClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"extends" : null,
|
10 |
+
"defaults" : {
|
11 |
+
"app_ids" : "None"
|
12 |
+
},
|
13 |
+
"options" : { },
|
14 |
+
"icons" : { }
|
15 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/defaults/InputClass.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "defaults",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "00f7a3dfaaa9073f029302b9fbb4191e",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define defaults for InputClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"extends" : null,
|
10 |
+
"defaults" : {
|
11 |
+
"prompt" : "None"
|
12 |
+
},
|
13 |
+
"options" : { },
|
14 |
+
"icons" : { }
|
15 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/defaults/OutputClass.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "defaults",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "523a8eb9ad1eb66ba6d1add87742cee5",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define defaults for OutputClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"extends" : null,
|
10 |
+
"defaults" : {
|
11 |
+
"message" : "None"
|
12 |
+
},
|
13 |
+
"options" : { },
|
14 |
+
"icons" : { }
|
15 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/encoding/ConfigClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "encoding",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "82a958bf5a277217a679c14824974c2f",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define encoding for ConfigClass concept",
|
8 |
+
"default" : "UTF-8",
|
9 |
+
"encodings" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/encoding/InputClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "encoding",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "00f7a3dfaaa9073f029302b9fbb4191e",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define encoding for InputClass concept",
|
8 |
+
"default" : "UTF-8",
|
9 |
+
"encodings" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/encoding/OutputClass.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "encoding",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "523a8eb9ad1eb66ba6d1add87742cee5",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define encoding for OutputClass concept",
|
8 |
+
"default" : "UTF-8",
|
9 |
+
"encodings" : { }
|
10 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/instruction/ConfigClass.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "instruction",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "82a958bf5a277217a679c14824974c2f",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define instruction for ConfigClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"instructions" : { },
|
10 |
+
"display" : { },
|
11 |
+
"layout" : { },
|
12 |
+
"render" : null,
|
13 |
+
"renderingType" : { }
|
14 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/instruction/InputClass.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "instruction",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "00f7a3dfaaa9073f029302b9fbb4191e",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define instruction for InputClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"instructions" : { },
|
10 |
+
"display" : { },
|
11 |
+
"layout" : { },
|
12 |
+
"render" : null,
|
13 |
+
"renderingType" : { }
|
14 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/instruction/OutputClass.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "instruction",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "523a8eb9ad1eb66ba6d1add87742cee5",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define instruction for OutputClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"instructions" : { },
|
10 |
+
"display" : { },
|
11 |
+
"layout" : { },
|
12 |
+
"render" : null,
|
13 |
+
"renderingType" : { }
|
14 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/meta.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
3 |
+
"$version" : "1.0",
|
4 |
+
"$author" : "andrei",
|
5 |
+
"$name" : "test-ai",
|
6 |
+
"$description" : null,
|
7 |
+
"storage" : {
|
8 |
+
"00f7a3dfaaa9073f029302b9fbb4191e" : {
|
9 |
+
"STRUCTURE" : "structure/InputClass.json",
|
10 |
+
"ENCODING" : "encoding/InputClass.json",
|
11 |
+
"SUBSET" : "subset/InputClass.json",
|
12 |
+
"RESTRICTION" : "restriction/InputClass.json",
|
13 |
+
"DEFAULTS" : "defaults/InputClass.json",
|
14 |
+
"VALIDATION" : "validation/InputClass.json",
|
15 |
+
"INSTRUCTION" : "instruction/InputClass.json",
|
16 |
+
"CONNECTION" : "connection/InputClass.json",
|
17 |
+
"NAMING" : "naming/InputClass.json"
|
18 |
+
},
|
19 |
+
"82a958bf5a277217a679c14824974c2f" : {
|
20 |
+
"STRUCTURE" : "structure/ConfigClass.json",
|
21 |
+
"ENCODING" : "encoding/ConfigClass.json",
|
22 |
+
"SUBSET" : "subset/ConfigClass.json",
|
23 |
+
"RESTRICTION" : "restriction/ConfigClass.json",
|
24 |
+
"DEFAULTS" : "defaults/ConfigClass.json",
|
25 |
+
"VALIDATION" : "validation/ConfigClass.json",
|
26 |
+
"INSTRUCTION" : "instruction/ConfigClass.json",
|
27 |
+
"CONNECTION" : "connection/ConfigClass.json",
|
28 |
+
"NAMING" : "naming/ConfigClass.json"
|
29 |
+
},
|
30 |
+
"523a8eb9ad1eb66ba6d1add87742cee5" : {
|
31 |
+
"STRUCTURE" : "structure/OutputClass.json",
|
32 |
+
"ENCODING" : "encoding/OutputClass.json",
|
33 |
+
"SUBSET" : "subset/OutputClass.json",
|
34 |
+
"RESTRICTION" : "restriction/OutputClass.json",
|
35 |
+
"DEFAULTS" : "defaults/OutputClass.json",
|
36 |
+
"VALIDATION" : "validation/OutputClass.json",
|
37 |
+
"INSTRUCTION" : "instruction/OutputClass.json",
|
38 |
+
"CONNECTION" : "connection/OutputClass.json",
|
39 |
+
"NAMING" : "naming/OutputClass.json"
|
40 |
+
}
|
41 |
+
}
|
42 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/naming/ConfigClass.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "naming",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "82a958bf5a277217a679c14824974c2f",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define naming for ConfigClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"selfName" : null,
|
10 |
+
"names" : { },
|
11 |
+
"labels" : { }
|
12 |
+
}
|
onto/dc8f06af066e4a7880a5938933236037/naming/InputClass.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$type" : "naming",
|
3 |
+
"schema" : "dc8f06af066e4a7880a5938933236037",
|
4 |
+
"$version" : "1.0",
|
5 |
+
"$id" : "00f7a3dfaaa9073f029302b9fbb4191e",
|
6 |
+
"$author" : "andrei",
|
7 |
+
"description" : "Define naming for InputClass concept",
|
8 |
+
"locale" : "en_US",
|
9 |
+
"selfName" : null,
|
10 |
+
"names" : { },
|
11 |
+
"labels" : { }
|
12 |
+
}
|