Spaces:
Running
Running
const express = require('express'); | |
const bodyParser = require('body-parser'); | |
const fetch = require('node-fetch'); | |
const app = express(); | |
const port = 3000; | |
app.use(bodyParser.json()); | |
app.post('/generate', async (req, res) => { | |
const { messages, temperature, max_tokens } = req.body; | |
try { | |
const response = await fetch('https://api-inference.huggingface.co/models/codellama/CodeLlama-34b-Instruct-hf', { | |
method: 'POST', | |
headers: { | |
'Content-Type': 'application/json', | |
'Authorization': `Bearer ${process.env.API_KEY}` // Замените на ваш токен Hugging Face API | |
}, | |
body: JSON.stringify({ | |
inputs: messages, | |
parameters: { | |
temperature: temperature || 0.7, | |
max_new_tokens: max_tokens || 100 | |
} | |
}) | |
}); | |
const data = await response.json(); | |
const generatedText = data.generated_text; | |
// Добавляем сгенерированное сообщение в конец массива messages | |
messages.push({ role: "assistant", content: generatedText }); | |
res.json({ messages }); | |
} catch (error) { | |
console.error(error); | |
res.status(500).json({ error: 'Произошла ошибка при генерации текста.' }); | |
} | |
}); | |
app.listen(port, () => { | |
console.log(`Сервер запущен на порту ${port}`); | |
}); |