Alysha Creelman commited on
Commit
816324f
·
unverified ·
1 Parent(s): 91471e2

adding token app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -3,9 +3,15 @@ from huggingface_hub import InferenceClient
3
  import torch
4
  from transformers import pipeline
5
  import os
 
 
 
 
 
 
6
 
7
  # Inference client setup with token from environment
8
- token = os.getenv('HF_TOKEN')
9
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
10
  pipe = pipeline("text-generation", "TinyLlama/TinyLlama_v1.1", torch_dtype=torch.bfloat16, device_map="auto")
11
  # pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
 
3
  import torch
4
  from transformers import pipeline
5
  import os
6
+ import sys
7
+
8
+ if len(sys.argv) > 1:
9
+ token = sys.argv[1]
10
+ else:
11
+ token = os.getenv('HF_TOKEN')
12
 
13
  # Inference client setup with token from environment
14
+ # token = os.getenv('HF_TOKEN')
15
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
16
  pipe = pipeline("text-generation", "TinyLlama/TinyLlama_v1.1", torch_dtype=torch.bfloat16, device_map="auto")
17
  # pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")