unclemusclez commited on
Commit
b158147
1 Parent(s): 94d6df0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -17,11 +17,12 @@ from textwrap import dedent
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
  OLLAMA_USERNAME = os.environ.get("OLLAMA_USERNAME").lower()
20
- ollama_pubkey = open("/home/user/.ollama/id_ed25519.pub", "r")
21
 
22
- def process_model(model_id, ollamafy, ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
23
  if oauth_token.token is None:
24
  raise ValueError("You must be logged in to use GGUF-my-repo")
 
25
  model_name = model_id.split('/')[-1]
26
  fp16 = f"{model_name}-fp16.gguf"
27
 
@@ -162,8 +163,8 @@ with gr.Blocks(css=css) as demo:
162
 
163
  ollama_q_method = gr.Dropdown(
164
  ["FP16", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_1", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_1", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
165
- label="Ollama Lastest Quantization Method",
166
- info="Chose which quantization will be labled with the latest tag in the Ollama Library",
167
  value="FP16",
168
  filterable=False,
169
  visible=False
@@ -172,17 +173,18 @@ with gr.Blocks(css=css) as demo:
172
  latest = gr.Checkbox(
173
  value=False,
174
  label="Latest",
175
- info="Copy Model to Ollama Library with the :latest tag"
176
  )
177
 
178
  maintainer = gr.Checkbox(
179
  value=False,
180
  label="Maintainer",
181
- info="This is your original repository on both Hugging Face and Ollama. DO NOT USE Unless same USERNAME on both platforms!!!"
 
182
  )
183
 
184
  iface = gr.Interface(
185
- fn=process_model,
186
  inputs=[
187
  model_id,
188
  ollama_q_method,
 
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
  OLLAMA_USERNAME = os.environ.get("OLLAMA_USERNAME").lower()
20
+ ollama_pubkey = open(f"{HOME}/.ollama/id_ed25519.pub", "r")
21
 
22
+ def ollamafy_model(model_id, ollamafy, ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
23
  if oauth_token.token is None:
24
  raise ValueError("You must be logged in to use GGUF-my-repo")
25
+
26
  model_name = model_id.split('/')[-1]
27
  fp16 = f"{model_name}-fp16.gguf"
28
 
 
163
 
164
  ollama_q_method = gr.Dropdown(
165
  ["FP16", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_1", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_1", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
166
+ label="Ollama Quantization Method",
167
+ info="Chose which quantization will created and exported to the Ollama Library",
168
  value="FP16",
169
  filterable=False,
170
  visible=False
 
173
  latest = gr.Checkbox(
174
  value=False,
175
  label="Latest",
176
+ info="Push Model to the Ollama Library with the :latest tag"
177
  )
178
 
179
  maintainer = gr.Checkbox(
180
  value=False,
181
  label="Maintainer",
182
+ info="This is your original repository on both Hugging Face and Ollama.
183
+ DO NOT USE unless using the same USERNAME on both platforms."
184
  )
185
 
186
  iface = gr.Interface(
187
+ fn=ollamafy_model,
188
  inputs=[
189
  model_id,
190
  ollama_q_method,