Update README.md
Browse files
README.md
CHANGED
@@ -64,4 +64,52 @@ export KIND_TEACHER_HOST="localhost"
|
|
64 |
Once the environment is configured, you can execute the program by running the following command:
|
65 |
```bash
|
66 |
llamafactory-cli api run_api_inference_1.yaml
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
```
|
|
|
64 |
Once the environment is configured, you can execute the program by running the following command:
|
65 |
```bash
|
66 |
llamafactory-cli api run_api_inference_1.yaml
|
67 |
+
```
|
68 |
+
|
69 |
+
**API Call Format**
|
70 |
+
```python
|
71 |
+
address="localhost"
|
72 |
+
port=8000
|
73 |
+
type_message = {"GET": "/models", "POST": "/chat/completions"}
|
74 |
+
url = f'http://{address}:{port}/v1{type_message["POST"]}'
|
75 |
+
|
76 |
+
headers = {
|
77 |
+
'accept': 'application/json',
|
78 |
+
'Content-Type': 'application/json'
|
79 |
+
}
|
80 |
+
|
81 |
+
messages = [
|
82 |
+
{
|
83 |
+
"role": "system", # "user", "assistant" or "system"
|
84 |
+
"content": "You are a kind teacher that help students with their problems.",
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"role": "user", # "user", "assistant" or "system"
|
88 |
+
"content": "Hello teacher",
|
89 |
+
"tool_calls": []
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"role": "assistant", # "user", "assistant" or "system"
|
93 |
+
"content": "Hello student!",
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"role": "user", # "user", "assistant" or "system"
|
97 |
+
"content": "Can you help me to understand the past perfect of english?",
|
98 |
+
"tool_calls": []
|
99 |
+
},
|
100 |
+
]
|
101 |
+
|
102 |
+
data = {
|
103 |
+
"model": "Transducens/kind_teacher",
|
104 |
+
"messages": messages, # messages must be formatted in the required format
|
105 |
+
"tools": [],
|
106 |
+
"do_sample": True,
|
107 |
+
"temperature": 1.0,
|
108 |
+
"top_p": 0.7,
|
109 |
+
"n": 1, # number of completions (responses) to generate
|
110 |
+
"max_tokens": 150,
|
111 |
+
"stream": False
|
112 |
+
}
|
113 |
+
|
114 |
+
response = requests.post(url, headers=headers, data=json.dumps(data))
|
115 |
```
|