limcheekin
commited on
Commit
•
9c1af73
1
Parent(s):
c5d5f04
feat: added code for serving README.md
Browse files- main.py +26 -0
- start_server.sh +1 -1
main.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_cpp.server.app import create_app, Settings
|
2 |
+
# from fastapi.staticfiles import StaticFiles
|
3 |
+
from fastapi.responses import HTMLResponse
|
4 |
+
import os
|
5 |
+
|
6 |
+
print("os.cpu_count()", os.cpu_count())
|
7 |
+
app = create_app(
|
8 |
+
Settings(
|
9 |
+
n_threads=os.cpu_count(),
|
10 |
+
model="model/ggmlv3-model.bin",
|
11 |
+
embedding=False
|
12 |
+
)
|
13 |
+
)
|
14 |
+
|
15 |
+
# app.mount("/static", StaticFiles(directory="static"), name="static")
|
16 |
+
|
17 |
+
|
18 |
+
@app.get("/", response_class=HTMLResponse)
|
19 |
+
async def read_items():
|
20 |
+
with open("README.md", "r") as f:
|
21 |
+
content = f.read()
|
22 |
+
return content
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
import uvicorn
|
26 |
+
uvicorn.run(app, host=os.environ["HOST"], port=os.environ["PORT"])
|
start_server.sh
CHANGED
@@ -3,4 +3,4 @@
|
|
3 |
# For mlock support
|
4 |
ulimit -l unlimited
|
5 |
|
6 |
-
python3 -B
|
|
|
3 |
# For mlock support
|
4 |
ulimit -l unlimited
|
5 |
|
6 |
+
python3 -B main.py
|