Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Merge pull request #9 from all-in-aigc/bugfix/predict-import-error
Browse files- README.md +9 -6
- src/app/queries/predict.ts +9 -3
README.md
CHANGED
@@ -102,10 +102,13 @@ To activate it, create a `.env.local` configuration file:
|
|
102 |
|
103 |
```bash
|
104 |
LLM_ENGINE="OPENAI"
|
|
|
105 |
# default openai api base url is: https://api.openai.com/v1
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
109 |
```
|
110 |
|
111 |
### Option 4: Fork and modify the code to use a different LLM system
|
@@ -140,11 +143,11 @@ To use Replicate, create a `.env.local` configuration file:
|
|
140 |
```bash
|
141 |
RENDERING_ENGINE="REPLICATE"
|
142 |
|
143 |
-
|
144 |
|
145 |
-
|
146 |
|
147 |
-
|
148 |
```
|
149 |
|
150 |
### Option 3: Use another SDXL API
|
|
|
102 |
|
103 |
```bash
|
104 |
LLM_ENGINE="OPENAI"
|
105 |
+
|
106 |
# default openai api base url is: https://api.openai.com/v1
|
107 |
+
LLM_OPENAI_API_BASE_URL="Your OpenAI API Base URL"
|
108 |
+
|
109 |
+
LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
|
110 |
+
|
111 |
+
AUTH_OPENAI_API_KEY="Your OpenAI API Key"
|
112 |
```
|
113 |
|
114 |
### Option 4: Fork and modify the code to use a different LLM system
|
|
|
143 |
```bash
|
144 |
RENDERING_ENGINE="REPLICATE"
|
145 |
|
146 |
+
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
|
147 |
|
148 |
+
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
|
149 |
|
150 |
+
AUTH_REPLICATE_API_TOKEN="Your Replicate token"
|
151 |
```
|
152 |
|
153 |
### Option 3: Use another SDXL API
|
src/app/queries/predict.ts
CHANGED
@@ -1,9 +1,15 @@
|
|
1 |
"use server"
|
2 |
|
3 |
import { LLMEngine } from "@/types"
|
4 |
-
import { predictWithHuggingFace } from "./predictWithHuggingFace"
|
5 |
-
import { predictWithOpenAI } from "./predictWithOpenAI"
|
6 |
|
7 |
const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
|
8 |
|
9 |
-
export const predict =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"use server"
|
2 |
|
3 |
import { LLMEngine } from "@/types"
|
|
|
|
|
4 |
|
5 |
const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
|
6 |
|
7 |
+
export const predict = async () => {
|
8 |
+
if (llmEngine === "OPENAI") {
|
9 |
+
const module = await import("./predictWithOpenAI")
|
10 |
+
return module.predictWithOpenAI
|
11 |
+
} else {
|
12 |
+
const module = await import("./predictWithHuggingFace")
|
13 |
+
return module.predictWithHuggingFace
|
14 |
+
}
|
15 |
+
}
|