masanorihirano commited on
Commit
a1a5519
·
1 Parent(s): deb1174

limited to 256 token

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -166,7 +166,7 @@ def evaluate(
166
  top_k: int = 0
167
  prompt = generate_prompt(instruction, input)
168
  inputs = tokenizer(prompt, return_tensors="pt")
169
- input_ids = inputs["input_ids"].to(device)
170
  generation_config = GenerationConfig(
171
  temperature=temperature,
172
  top_p=top_p,
@@ -235,7 +235,7 @@ with gr.Blocks(
235
  with gr.Row():
236
  with gr.Column():
237
  instruction = gr.Textbox(
238
- lines=3, label="Instruction", placeholder="こんにちは"
239
  )
240
  inputs = gr.Textbox(lines=1, label="Input", placeholder="none")
241
  with gr.Row():
 
166
  top_k: int = 0
167
  prompt = generate_prompt(instruction, input)
168
  inputs = tokenizer(prompt, return_tensors="pt")
169
+ input_ids = inputs["input_ids"][:256].to(device)
170
  generation_config = GenerationConfig(
171
  temperature=temperature,
172
  top_p=top_p,
 
235
  with gr.Row():
236
  with gr.Column():
237
  instruction = gr.Textbox(
238
+ lines=3, label="Instruction (Pre-Prompt + Instruction + Input is limitted to 256 tokens)", placeholder="こんにちは"
239
  )
240
  inputs = gr.Textbox(lines=1, label="Input", placeholder="none")
241
  with gr.Row():