Update README.md
Browse files
README.md
CHANGED
@@ -25,7 +25,9 @@ widget:
|
|
25 |
|
26 |
<!-- Provide a quick summary of what the model is/does. -->
|
27 |
|
28 |
-
A fine-tuned version of Phi-2 for the NL2SQL usecase on `b-mc2/sql-create-context` dataset.
|
|
|
|
|
29 |
|
30 |
## Model Details
|
31 |
|
@@ -53,35 +55,6 @@ This model has been finetuned with `b-mc2/sql-create-context` on `microsoft/phi-
|
|
53 |
|
54 |
Model is supposed to be used for the cases where you have a natural language question, database schema which is relevant the question to retrieve a SQL query which answers the question. The context should be below 2048 tokens. The output will be generated in postgresql.
|
55 |
|
56 |
-
### Direct Use
|
57 |
-
|
58 |
-
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
59 |
-
|
60 |
-
```python
|
61 |
-
# SAME TEMPLATE AS DEFOG MODEL
|
62 |
-
prompt = f"""### Task
|
63 |
-
Generate a SQL query to answer the following question:
|
64 |
-
`{data_point['question']}`
|
65 |
-
|
66 |
-
### Database Schema
|
67 |
-
The query will run on a database with the following schema:
|
68 |
-
{data_point['context']}
|
69 |
-
|
70 |
-
### Answer
|
71 |
-
Given the database schema, here is the SQL query that answers `{data_point['question']}`:
|
72 |
-
```sql"""
|
73 |
-
```
|
74 |
-
|
75 |
-
```python
|
76 |
-
# USING ON CPU MACHINE
|
77 |
-
from llama_cpp import Llama
|
78 |
-
|
79 |
-
phi2 = Llama(model_path=f"{path_to_model}/phi2_sqlcoder_f16.gguf")
|
80 |
-
|
81 |
-
response = phi2(prompt=prompt, max_tokens = 200, temperature = 0.2, stop = ['```'])
|
82 |
-
|
83 |
-
print(response['choices'][0]['text'].strip())
|
84 |
-
```
|
85 |
|
86 |
### Downstream Use
|
87 |
|
@@ -89,26 +62,27 @@ print(response['choices'][0]['text'].strip())
|
|
89 |
|
90 |
```python
|
91 |
# USING ON GPU MACHINE
|
92 |
-
import
|
93 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
94 |
-
# from peft import PeftModel, PeftConfig
|
95 |
|
96 |
-
model_name = "
|
97 |
|
98 |
model = AutoModelForCausalLM.from_pretrained(
|
99 |
model_name,
|
100 |
trust_remote_code=True,
|
101 |
-
device_map="auto"
|
|
|
102 |
)
|
103 |
|
104 |
-
|
|
|
105 |
|
106 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
107 |
tokenizer.pad_token = tokenizer.eos_token
|
108 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
109 |
inputs.to('cuda')
|
110 |
|
111 |
-
outputs =
|
112 |
text = tokenizer.batch_decode(outputs,skip_special_tokens=True)[0]
|
113 |
print(text)
|
114 |
```
|
|
|
25 |
|
26 |
<!-- Provide a quick summary of what the model is/does. -->
|
27 |
|
28 |
+
A fine-tuned version of Phi-2 for the NL2SQL usecase on `b-mc2/sql-create-context` dataset.
|
29 |
+
|
30 |
+
__*This contains just the adapters!! You need to load the Phi2 model and add these adapters*__
|
31 |
|
32 |
## Model Details
|
33 |
|
|
|
55 |
|
56 |
Model is supposed to be used for the cases where you have a natural language question, database schema which is relevant the question to retrieve a SQL query which answers the question. The context should be below 2048 tokens. The output will be generated in postgresql.
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
### Downstream Use
|
60 |
|
|
|
62 |
|
63 |
```python
|
64 |
# USING ON GPU MACHINE
|
65 |
+
from peft import PeftModel
|
66 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
67 |
|
68 |
+
model_name = "microsoft/phi-2"
|
69 |
|
70 |
model = AutoModelForCausalLM.from_pretrained(
|
71 |
model_name,
|
72 |
trust_remote_code=True,
|
73 |
+
device_map="auto",
|
74 |
+
torch_dtype=torch.float16
|
75 |
)
|
76 |
|
77 |
+
peftmodel = PeftModel.from_pretrained(model, "pavankumarbalijepalli/phi2-nl2sql-lora")
|
78 |
+
peftmodel = peftmodel.merge_and_unload()
|
79 |
|
80 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
81 |
tokenizer.pad_token = tokenizer.eos_token
|
82 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
83 |
inputs.to('cuda')
|
84 |
|
85 |
+
outputs = peftmodel.generate(**inputs, max_length=1000)
|
86 |
text = tokenizer.batch_decode(outputs,skip_special_tokens=True)[0]
|
87 |
print(text)
|
88 |
```
|