Spaces:
Running
Running
Update index.js
Browse files
index.js
CHANGED
@@ -1,23 +1,35 @@
|
|
1 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
2 |
|
|
|
3 |
env.allowLocalModels = false;
|
4 |
|
|
|
5 |
const status = document.getElementById('status');
|
6 |
const userInput = document.getElementById('user-input');
|
7 |
const outputContainer = document.getElementById('output');
|
8 |
const submitButton = document.getElementById('submit-button');
|
9 |
|
10 |
-
|
11 |
-
|
12 |
let generator;
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
}
|
20 |
|
|
|
|
|
|
|
|
|
21 |
submitButton.addEventListener('click', async () => {
|
22 |
const inputText = userInput.value.trim();
|
23 |
|
@@ -26,20 +38,24 @@ submitButton.addEventListener('click', async () => {
|
|
26 |
return;
|
27 |
}
|
28 |
|
|
|
29 |
status.textContent = 'Generating response...';
|
30 |
|
31 |
try {
|
|
|
32 |
const response = await generator(inputText, {
|
33 |
-
max_new_tokens:
|
34 |
-
temperature: 0.7,
|
35 |
-
top_p: 0.95,
|
36 |
});
|
37 |
|
|
|
38 |
outputContainer.innerText = response[0].generated_text;
|
39 |
} catch (error) {
|
40 |
console.error('Error generating response:', error);
|
41 |
outputContainer.innerText = 'Error generating response. Please try again.';
|
42 |
}
|
43 |
|
|
|
44 |
status.textContent = 'Model loaded. Ready to chat!';
|
45 |
});
|
|
|
1 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
2 |
|
3 |
+
// Disable local models to fetch models from Hugging Face Hub
|
4 |
env.allowLocalModels = false;
|
5 |
|
6 |
+
// Reference the DOM elements
|
7 |
const status = document.getElementById('status');
|
8 |
const userInput = document.getElementById('user-input');
|
9 |
const outputContainer = document.getElementById('output');
|
10 |
const submitButton = document.getElementById('submit-button');
|
11 |
|
12 |
+
// Initialize variables
|
|
|
13 |
let generator;
|
14 |
+
|
15 |
+
// Function to load the model
|
16 |
+
async function loadModel() {
|
17 |
+
try {
|
18 |
+
status.textContent = 'Loading model...';
|
19 |
+
|
20 |
+
// Load the LLaMA or GPT model for text generation
|
21 |
+
generator = await pipeline('text-generation', 'meta-llama/Llama-2-7b-chat-hf'); // Replace with your model
|
22 |
+
status.textContent = 'Model loaded. Ready to chat!';
|
23 |
+
} catch (error) {
|
24 |
+
console.error('Error loading model:', error);
|
25 |
+
status.textContent = 'Failed to load model. Check the console for details.';
|
26 |
+
}
|
27 |
}
|
28 |
|
29 |
+
// Load the model at startup
|
30 |
+
await loadModel();
|
31 |
+
|
32 |
+
// Add event listener for button clicks
|
33 |
submitButton.addEventListener('click', async () => {
|
34 |
const inputText = userInput.value.trim();
|
35 |
|
|
|
38 |
return;
|
39 |
}
|
40 |
|
41 |
+
// Show the user that the model is processing
|
42 |
status.textContent = 'Generating response...';
|
43 |
|
44 |
try {
|
45 |
+
// Generate a response using the pipeline
|
46 |
const response = await generator(inputText, {
|
47 |
+
max_new_tokens: 100, // Adjust as needed for response length
|
48 |
+
temperature: 0.7, // Controls randomness
|
49 |
+
top_p: 0.95, // Nucleus sampling
|
50 |
});
|
51 |
|
52 |
+
// Display the generated response
|
53 |
outputContainer.innerText = response[0].generated_text;
|
54 |
} catch (error) {
|
55 |
console.error('Error generating response:', error);
|
56 |
outputContainer.innerText = 'Error generating response. Please try again.';
|
57 |
}
|
58 |
|
59 |
+
// Reset status to indicate the model is ready again
|
60 |
status.textContent = 'Model loaded. Ready to chat!';
|
61 |
});
|