Spaces:
Running
Running
add config.py
Browse files
app.py
CHANGED
@@ -7,6 +7,9 @@ import base64 # 画像デコード用に追加
|
|
7 |
from io import BytesIO # 画像ダウンロード用に追加
|
8 |
from together import Together # Together AI SDKを追加
|
9 |
|
|
|
|
|
|
|
10 |
# --- RECIPE_BASE_PROMPT のインポート ---
|
11 |
try:
|
12 |
from prompt import RECIPE_BASE_PROMPT
|
@@ -14,13 +17,6 @@ except ImportError:
|
|
14 |
st.error("Error: 'prompt.py' not found or 'RECIPE_BASE_PROMPT' is not defined within it.")
|
15 |
st.stop()
|
16 |
|
17 |
-
# --- 定数と設定 ---
|
18 |
-
models = {
|
19 |
-
"llama3.1-8b": {"name": "Llama3.1-8b", "tokens": 8192, "developer": "Meta"},
|
20 |
-
"llama-3.3-70b": {"name": "Llama-3.3-70b", "tokens": 8192, "developer": "Meta"}
|
21 |
-
}
|
22 |
-
BASE_URL = "http://localhost:8000/v1"
|
23 |
-
IMAGE_MODEL = "black-forest-labs/FLUX.1-schnell-Free" # 使用する画像生成モデル
|
24 |
|
25 |
# --- 環境変数読み込み ---
|
26 |
load_dotenv()
|
@@ -47,12 +43,12 @@ def generate_image_from_prompt(_together_client, prompt_text):
|
|
47 |
try:
|
48 |
response = _together_client.images.generate(
|
49 |
prompt=prompt_text,
|
50 |
-
model=IMAGE_MODEL,
|
51 |
-
width=
|
52 |
-
height=
|
53 |
-
steps=
|
54 |
n=1,
|
55 |
-
response_format=
|
56 |
# stop=[] # stopは通常不要
|
57 |
)
|
58 |
if response.data and response.data[0].b64_json:
|
@@ -103,13 +99,13 @@ with st.sidebar:
|
|
103 |
# Model selection
|
104 |
model_option = st.selectbox(
|
105 |
"Choose a LLM model:", # ラベルを明確化
|
106 |
-
options=list(
|
107 |
-
format_func=lambda x:
|
108 |
key="model_select"
|
109 |
)
|
110 |
|
111 |
# Max tokens slider
|
112 |
-
max_tokens_range =
|
113 |
default_tokens = min(2048, max_tokens_range)
|
114 |
max_tokens = st.slider(
|
115 |
"Max Tokens (LLM):", # ラベルを明確化
|
@@ -134,7 +130,7 @@ if not cerebras_api_key:
|
|
134 |
try:
|
135 |
# Cerebras Client
|
136 |
if use_optillm:
|
137 |
-
llm_client = openai.OpenAI(base_url=BASE_URL, api_key=cerebras_api_key)
|
138 |
else:
|
139 |
llm_client = Cerebras(api_key=cerebras_api_key)
|
140 |
|
|
|
7 |
from io import BytesIO # 画像ダウンロード用に追加
|
8 |
from together import Together # Together AI SDKを追加
|
9 |
|
10 |
+
# config
|
11 |
+
import config
|
12 |
+
|
13 |
# --- RECIPE_BASE_PROMPT のインポート ---
|
14 |
try:
|
15 |
from prompt import RECIPE_BASE_PROMPT
|
|
|
17 |
st.error("Error: 'prompt.py' not found or 'RECIPE_BASE_PROMPT' is not defined within it.")
|
18 |
st.stop()
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
# --- 環境変数読み込み ---
|
22 |
load_dotenv()
|
|
|
43 |
try:
|
44 |
response = _together_client.images.generate(
|
45 |
prompt=prompt_text,
|
46 |
+
model=config.IMAGE_MODEL,
|
47 |
+
width=config.IMAGE_WIDTH,
|
48 |
+
height=config.IMAGE_HEIGHT,
|
49 |
+
steps=config.IMAGE_STEPS,
|
50 |
n=1,
|
51 |
+
response_format=config.IMAGE_RESPONSE_FORMAT,
|
52 |
# stop=[] # stopは通常不要
|
53 |
)
|
54 |
if response.data and response.data[0].b64_json:
|
|
|
99 |
# Model selection
|
100 |
model_option = st.selectbox(
|
101 |
"Choose a LLM model:", # ラベルを明確化
|
102 |
+
options=list(config.MODELS.keys()),
|
103 |
+
format_func=lambda x: config.MODELS[x]["name"],
|
104 |
key="model_select"
|
105 |
)
|
106 |
|
107 |
# Max tokens slider
|
108 |
+
max_tokens_range = config.MODELS[model_option]["tokens"]
|
109 |
default_tokens = min(2048, max_tokens_range)
|
110 |
max_tokens = st.slider(
|
111 |
"Max Tokens (LLM):", # ラベルを明確化
|
|
|
130 |
try:
|
131 |
# Cerebras Client
|
132 |
if use_optillm:
|
133 |
+
llm_client = openai.OpenAI(base_url=config.BASE_URL, api_key=cerebras_api_key)
|
134 |
else:
|
135 |
llm_client = Cerebras(api_key=cerebras_api_key)
|
136 |
|
config.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
IMAGE_MODEL = "black-forest-labs/FLUX.1-schnell-Free" # model from together ai
|
2 |
+
BASE_URL = "http://localhost:8000/v1"
|
3 |
+
|
4 |
+
|
5 |
+
MODELS = {
|
6 |
+
"llama3.1-8b": {"name": "Llama3.1-8b", "tokens": 8192, "developer": "Meta"},
|
7 |
+
"llama-3.3-70b": {"name": "Llama-3.3-70b", "tokens": 8192, "developer": "Meta"}
|
8 |
+
}
|
9 |
+
|
10 |
+
|
11 |
+
# config for image generation
|
12 |
+
IMAGE_WIDTH = 1024
|
13 |
+
IMAGE_HEIGHT = 768
|
14 |
+
IMAGE_STEPS = 4
|
15 |
+
IMAGE_N = 1
|
16 |
+
IMAGE_RESPONSE_FORMAT = "b64_json"
|