enzostvs HF staff commited on
Commit
4f54699
1 Parent(s): c334480

use follow count through overview api

Browse files
Files changed (2) hide show
  1. app/actions/roast.ts +74 -42
  2. utils/roast.ts +52 -46
app/actions/roast.ts CHANGED
@@ -1,7 +1,7 @@
1
- "use server"
2
 
3
  import { AutoTokenizer } from "@xenova/transformers";
4
- import { HfInference } from '@huggingface/inference'
5
 
6
  import { formatInformations, transformForInference } from "@/utils/roast";
7
  import { FormProps } from "@/components/form";
@@ -10,10 +10,15 @@ import prisma from "@/utils/prisma";
10
  const MODEL_ID = "meta-llama/Meta-Llama-3.1-70B-Instruct";
11
 
12
  export async function roast({ username, language }: FormProps) {
13
- const userResponse = await fetch(`https://huggingface.co/api/users/${username}/overview`);
 
 
14
  const user = await userResponse.json();
15
  if (!user || user.error) {
16
- return { error: user.error ?? "Something wrong happened, please retry.", status: 404 };
 
 
 
17
  }
18
 
19
  if (!username) {
@@ -21,63 +26,90 @@ export async function roast({ username, language }: FormProps) {
21
  }
22
 
23
  const requests = Promise.all([
24
- await fetch(`https://huggingface.co/api/users/${username}/following`),
25
- await fetch(`https://huggingface.co/api/users/${username}/followers`),
26
- await fetch(`https://huggingface.co/api/spaces?author=${username}&sort=likes&limit=300&full=false&l`),
27
- await fetch(`https://huggingface.co/api/models?author=${username}&sort=downloads&limit=300&full=false`),
28
- await fetch(`https://huggingface.co/api/collections?owner=${username}&limit=100&sort=upvotes&full=false`)
 
 
 
 
29
  ]);
30
 
31
- const [followingResponse, followersResponse, spacesResponse, modelsResponse, collectionsResponse] = await requests;
32
- const [following, followers, spaces, models, collections] = await Promise.all([
33
- followingResponse.json(),
34
- followersResponse.json(),
35
  spacesResponse.json(),
36
  modelsResponse.json(),
37
- collectionsResponse.json()
38
  ]);
39
- const [spacesLikes, modelsLikes] = [spaces, models].map((items) => items.reduce((acc: number, item: any) => acc + item.likes, 0));
40
- const collectionsUpvotes = collections?.reduce((acc: number, item: any) => acc + item.upvotes, 0);
41
-
42
- const datas = formatInformations(user, following.length, followers.length, spaces, models, collections, spacesLikes, modelsLikes, collectionsUpvotes);
43
- const chat = transformForInference(datas, language, user.fullname ?? username);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  const hf = new HfInference(process.env.HF_ACCESS_TOKEN);
46
- const tokenizer = await AutoTokenizer.from_pretrained("philschmid/meta-llama-3-tokenizer")
 
 
47
 
48
- const formattedPrompt = tokenizer.apply_chat_template(chat, { tokenize: false, add_generation_prompt: true })
49
- const res = await hf.textGeneration({
50
- model: MODEL_ID,
51
- inputs: formattedPrompt as string,
52
- parameters: {
53
- return_full_text: false,
54
- max_new_tokens: 1024,
55
- stop_sequences: ["<|end|>", "<|endoftext|>", "<|assistant|>"],
 
 
 
 
 
 
 
 
56
  }
57
- }, {
58
- use_cache: false,
59
- })
60
 
61
  return {
62
- data: res.generated_text
63
- }
64
  }
65
 
66
- export async function getRoast({ id } : { id: string }) {
67
  const roast = await prisma.quote.findUnique({
68
  where: {
69
- id
70
- }
71
- })
72
 
73
  if (!roast) {
74
  return {
75
  error: "Roast not found",
76
- status: 404
77
- }
78
  }
79
 
80
  return {
81
- data: roast
82
- }
83
- }
 
1
+ "use server";
2
 
3
  import { AutoTokenizer } from "@xenova/transformers";
4
+ import { HfInference } from "@huggingface/inference";
5
 
6
  import { formatInformations, transformForInference } from "@/utils/roast";
7
  import { FormProps } from "@/components/form";
 
10
  const MODEL_ID = "meta-llama/Meta-Llama-3.1-70B-Instruct";
11
 
12
  export async function roast({ username, language }: FormProps) {
13
+ const userResponse = await fetch(
14
+ `https://huggingface.co/api/users/${username}/overview`
15
+ );
16
  const user = await userResponse.json();
17
  if (!user || user.error) {
18
+ return {
19
+ error: user.error ?? "Something wrong happened, please retry.",
20
+ status: 404,
21
+ };
22
  }
23
 
24
  if (!username) {
 
26
  }
27
 
28
  const requests = Promise.all([
29
+ await fetch(
30
+ `https://huggingface.co/api/spaces?author=${username}&sort=likes&limit=300&full=false&l`
31
+ ),
32
+ await fetch(
33
+ `https://huggingface.co/api/models?author=${username}&sort=downloads&limit=300&full=false`
34
+ ),
35
+ await fetch(
36
+ `https://huggingface.co/api/collections?owner=${username}&limit=100&sort=upvotes&full=false`
37
+ ),
38
  ]);
39
 
40
+ const [spacesResponse, modelsResponse, collectionsResponse] = await requests;
41
+ const [spaces, models, collections] = await Promise.all([
 
 
42
  spacesResponse.json(),
43
  modelsResponse.json(),
44
+ collectionsResponse.json(),
45
  ]);
46
+ const [spacesLikes, modelsLikes] = [spaces, models].map((items) =>
47
+ items.reduce((acc: number, item: any) => acc + item.likes, 0)
48
+ );
49
+ const collectionsUpvotes = collections?.reduce(
50
+ (acc: number, item: any) => acc + item.upvotes,
51
+ 0
52
+ );
53
+
54
+ const datas = formatInformations(
55
+ user,
56
+ spaces,
57
+ models,
58
+ collections,
59
+ spacesLikes,
60
+ modelsLikes,
61
+ collectionsUpvotes
62
+ );
63
+ const chat = transformForInference(
64
+ datas,
65
+ language,
66
+ user.fullname ?? username
67
+ );
68
 
69
  const hf = new HfInference(process.env.HF_ACCESS_TOKEN);
70
+ const tokenizer = await AutoTokenizer.from_pretrained(
71
+ "philschmid/meta-llama-3-tokenizer"
72
+ );
73
 
74
+ const formattedPrompt = tokenizer.apply_chat_template(chat, {
75
+ tokenize: false,
76
+ add_generation_prompt: true,
77
+ });
78
+ const res = await hf.textGeneration(
79
+ {
80
+ model: MODEL_ID,
81
+ inputs: formattedPrompt as string,
82
+ parameters: {
83
+ return_full_text: false,
84
+ max_new_tokens: 1024,
85
+ stop_sequences: ["<|end|>", "<|endoftext|>", "<|assistant|>"],
86
+ },
87
+ },
88
+ {
89
+ use_cache: false,
90
  }
91
+ );
 
 
92
 
93
  return {
94
+ data: res.generated_text,
95
+ };
96
  }
97
 
98
+ export async function getRoast({ id }: { id: string }) {
99
  const roast = await prisma.quote.findUnique({
100
  where: {
101
+ id,
102
+ },
103
+ });
104
 
105
  if (!roast) {
106
  return {
107
  error: "Roast not found",
108
+ status: 404,
109
+ };
110
  }
111
 
112
  return {
113
+ data: roast,
114
+ };
115
+ }
utils/roast.ts CHANGED
@@ -2,8 +2,6 @@ import { Language } from "@/components/form";
2
 
3
  export const formatInformations = (
4
  user: any,
5
- countFollowing: number,
6
- countFollowers: number,
7
  spaces: any,
8
  models: any,
9
  collections: any,
@@ -12,70 +10,78 @@ export const formatInformations = (
12
  collectionsUpvotes: number
13
  ) => {
14
  const datas = {
15
- name: user.fullname,
16
- bio: user.details,
17
- organizations: user.orgs?.map((org: any) => ({
18
- name: org.fullname
19
  })),
20
- followers: countFollowers === 500 ? "500+" : countFollowers,
21
- following: countFollowing === 500 ? "500+" : countFollowing,
22
  total_spaces_likes: spacesLikes,
23
  total_models_likes: modelsLikes,
24
  total_collections_likes: collectionsUpvotes,
25
- last_5_spaces: spaces
26
- .map((space: any) => ({
27
- name: space?.cardData?.title ?? space.id?.split("/")[1],
28
- description: space?.cardData?.short_description,
29
- likes_count: space.likes,
30
- last_modified: space.lastModified,
31
- created_at: space.createdAt
32
- }))
33
- .slice(0, 5),
34
  last_5_models: models
35
- .map((model: any) => ({
36
- name: model.id?.split("/")[1],
37
- has_inference: model.inference,
38
- likes_count: model.likes,
39
- downloads_count: model.downloads,
40
- pipeline_tag: model.pipeline_tag,
41
- last_modified: model.lastModified,
42
- created_at: model.createdAt
43
- }))
44
- .slice(0, 5),
45
  last_5_collections: collections
46
- .map((collection: any) => ({
47
- name: collection.title,
48
- description: collection.description,
49
- upvotes_count: collection.upvotes,
50
- }))
51
- .slice(0, 5),
52
- };
53
 
54
  return datas;
55
- }
56
 
57
  export const transformForInference = (
58
  datas: Record<string, any>,
59
  language: Language,
60
- username: string,
61
  ) => {
62
- let user_content = `give a short and harsh roasting for the following hugging face profile: ${username}. Here are the details: "${JSON.stringify(datas)}"`
 
 
63
 
64
  switch (language) {
65
  case "fr":
66
- user_content = `fais une courte et cruelle critique sarcastique en argot pour le profil Hugging Face suivant en français : ${username}. Voici les détails: "${JSON.stringify(datas)}"`
 
 
67
  break;
68
  case "es":
69
- user_content = `haz una crítica corta y cruel para el siguiente perfil de Hugging Face en español: ${username}. Aquí están los detalles: "${JSON.stringify(datas)}"`
 
 
70
  break;
71
  }
72
 
73
- const chat = [{
74
- role: 'assistant',
75
- content:
76
- 'You roast people hugging face account based on their bio, name, spaces, and models as harsh and spicy as possible, and keep it short.'
77
- },
78
- { role: 'user', content: user_content }]
 
 
79
 
80
  return chat;
81
- }
 
2
 
3
  export const formatInformations = (
4
  user: any,
 
 
5
  spaces: any,
6
  models: any,
7
  collections: any,
 
10
  collectionsUpvotes: number
11
  ) => {
12
  const datas = {
13
+ name: user.fullname,
14
+ bio: user.details,
15
+ organizations: user.orgs?.map((org: any) => ({
16
+ name: org.fullname,
17
  })),
18
+ followers: user.numFollowers,
19
+ following: user.numFollowers,
20
  total_spaces_likes: spacesLikes,
21
  total_models_likes: modelsLikes,
22
  total_collections_likes: collectionsUpvotes,
23
+ last_5_spaces: spaces
24
+ .map((space: any) => ({
25
+ name: space?.cardData?.title ?? space.id?.split("/")[1],
26
+ description: space?.cardData?.short_description,
27
+ likes_count: space.likes,
28
+ last_modified: space.lastModified,
29
+ created_at: space.createdAt,
30
+ }))
31
+ .slice(0, 5),
32
  last_5_models: models
33
+ .map((model: any) => ({
34
+ name: model.id?.split("/")[1],
35
+ has_inference: model.inference,
36
+ likes_count: model.likes,
37
+ downloads_count: model.downloads,
38
+ pipeline_tag: model.pipeline_tag,
39
+ last_modified: model.lastModified,
40
+ created_at: model.createdAt,
41
+ }))
42
+ .slice(0, 5),
43
  last_5_collections: collections
44
+ .map((collection: any) => ({
45
+ name: collection.title,
46
+ description: collection.description,
47
+ upvotes_count: collection.upvotes,
48
+ }))
49
+ .slice(0, 5),
50
+ };
51
 
52
  return datas;
53
+ };
54
 
55
  export const transformForInference = (
56
  datas: Record<string, any>,
57
  language: Language,
58
+ username: string
59
  ) => {
60
+ let user_content = `give a short and harsh roasting for the following hugging face profile: ${username}. Here are the details: "${JSON.stringify(
61
+ datas
62
+ )}"`;
63
 
64
  switch (language) {
65
  case "fr":
66
+ user_content = `fais une courte et cruelle critique sarcastique en argot pour le profil Hugging Face suivant en français : ${username}. Voici les détails: "${JSON.stringify(
67
+ datas
68
+ )}"`;
69
  break;
70
  case "es":
71
+ user_content = `haz una crítica corta y cruel para el siguiente perfil de Hugging Face en español: ${username}. Aquí están los detalles: "${JSON.stringify(
72
+ datas
73
+ )}"`;
74
  break;
75
  }
76
 
77
+ const chat = [
78
+ {
79
+ role: "assistant",
80
+ content:
81
+ "You roast people hugging face account based on their bio, name, spaces, and models as harsh and spicy as possible, and keep it short.",
82
+ },
83
+ { role: "user", content: user_content },
84
+ ];
85
 
86
  return chat;
87
+ };