Class: HfInference

Hierarchy

Constructors

constructor

new HfInference(accessToken?, defaultOptions?)

Parameters

Name Type Default value
accessToken string ""
defaultOptions Options {}

Defined in

HfInference.ts:25

Properties

accessToken

Private Readonly accessToken: string

Defined in

HfInference.ts:22


audioClassification

audioClassification: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<AudioClassificationReturn>

Type declaration

▸ (args, options?): Promise<AudioClassificationReturn>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary audio data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<AudioClassificationReturn>

Defined in

tasks/audio/audioClassification.ts:30


automaticSpeechRecognition

automaticSpeechRecognition: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<AutomaticSpeechRecognitionOutput>

Type declaration

▸ (args, options?): Promise<AutomaticSpeechRecognitionOutput>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary audio data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<AutomaticSpeechRecognitionOutput>

Defined in

tasks/audio/automaticSpeechRecognition.ts:23


conversational

conversational: (args: { inputs: { generated_responses?: string[] ; past_user_inputs?: string[] ; text: string } ; model: string ; parameters?: { max_length?: number ; max_time?: number ; min_length?: number ; repetition_penalty?: number ; temperature?: number ; top_k?: number ; top_p?: number } }, options?: Options) => Promise<ConversationalOutput>

Type declaration

▸ (args, options?): Promise<ConversationalOutput>

Parameters
Name Type Description
args Object -
args.inputs Object -
args.inputs.generated_responses? string[] A list of strings corresponding to the earlier replies from the model.
args.inputs.past_user_inputs? string[] A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses.
args.inputs.text string The last input from the user in the conversation.
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.max_length? number (Default: None). Integer to define the maximum length in tokens of the output summary.
args.parameters.max_time? number (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
args.parameters.min_length? number (Default: None). Integer to define the minimum length in tokens of the output summary.
args.parameters.repetition_penalty? number (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
args.parameters.temperature? number (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
args.parameters.top_k? number (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
args.parameters.top_p? number (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
options? Options -
Returns

Promise<ConversationalOutput>

Defined in

tasks/nlp/conversational.ts:65


defaultOptions

Private Readonly defaultOptions: Options

Defined in

HfInference.ts:23


featureExtraction

featureExtraction: (args: { inputs: string | string[] ; model: string }, options?: Options) => Promise<FeatureExtractionOutput>

Type declaration

▸ (args, options?): Promise<FeatureExtractionOutput>

Parameters
Name Type Description
args Object -
args.inputs string | string[] The inputs is a string or a list of strings to get the features from. inputs: “That is a happy person”,
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<FeatureExtractionOutput>

Defined in

tasks/nlp/featureExtraction.ts:23


fillMask

fillMask: (args: { inputs: string ; model: string }, options?: Options) => Promise<FillMaskOutput>

Type declaration

▸ (args, options?): Promise<FillMaskOutput>

Parameters
Name Type Description
args Object -
args.inputs string -
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<FillMaskOutput>

Defined in

tasks/nlp/fillMask.ts:31


imageClassification

imageClassification: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<ImageClassificationOutput>

Type declaration

▸ (args, options?): Promise<ImageClassificationOutput>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary image data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<ImageClassificationOutput>

Defined in

tasks/cv/imageClassification.ts:29


imageSegmentation

imageSegmentation: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<ImageSegmentationOutput>

Type declaration

▸ (args, options?): Promise<ImageSegmentationOutput>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary image data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<ImageSegmentationOutput>

Defined in

tasks/cv/imageSegmentation.ts:33


imageToText

imageToText: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<ImageToTextOutput>

Type declaration

▸ (args, options?): Promise<ImageToTextOutput>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary image data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<ImageToTextOutput>

Defined in

tasks/cv/imageToText.ts:22


objectDetection

objectDetection: (args: { data: Blob | ArrayBuffer ; model: string }, options?: Options) => Promise<ObjectDetectionOutput>

Type declaration

▸ (args, options?): Promise<ObjectDetectionOutput>

Parameters
Name Type Description
args Object -
args.data Blob | ArrayBuffer Binary image data
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<ObjectDetectionOutput>

Defined in

tasks/cv/objectDetection.ts:39


questionAnswering

questionAnswering: (args: { inputs: { context: string ; question: string } ; model: string }, options?: Options) => Promise<QuestionAnsweringOutput>

Type declaration

▸ (args, options?): Promise<QuestionAnsweringOutput>

Parameters
Name Type Description
args Object -
args.inputs Object -
args.inputs.context string -
args.inputs.question string -
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<QuestionAnsweringOutput>

Defined in

tasks/nlp/questionAnswering.ts:34


request

request: (args: { data: Blob | ArrayBuffer ; model: string ; parameters?: Record<string, unknown> } | { inputs: unknown ; model: string ; parameters?: Record<string, unknown> }, options?: Options & { includeCredentials?: boolean }) => Promise<unknown>

Type declaration

▸ (args, options?): Promise<unknown>

Parameters
Name Type
args { data: Blob | ArrayBuffer ; model: string ; parameters?: Record<string, unknown> } | { inputs: unknown ; model: string ; parameters?: Record<string, unknown> }
options? Options & { includeCredentials?: boolean }
Returns

Promise<unknown>

Defined in

tasks/custom/request.ts:7


sentenceSimilarity

sentenceSimilarity: (args: { inputs: Record<string, unknown> | Record<string, unknown>[] ; model: string }, options?: Options) => Promise<SentenceSimilarityOutput>

Type declaration

▸ (args, options?): Promise<SentenceSimilarityOutput>

Parameters
Name Type Description
args Object -
args.inputs Record<string, unknown> | Record<string, unknown>[] The inputs vary based on the model. For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will have a source_sentence string and a sentences array of strings
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<SentenceSimilarityOutput>

Defined in

tasks/nlp/sentenceSimilarity.ts:23


streamingRequest

streamingRequest: (args: { data: Blob | ArrayBuffer ; model: string ; parameters?: Record<string, unknown> } | { inputs: unknown ; model: string ; parameters?: Record<string, unknown> }, options?: Options & { includeCredentials?: boolean }) => AsyncGenerator<unknown, any, unknown>

Type declaration

▸ (args, options?): AsyncGenerator<unknown, any, unknown>

Parameters
Name Type
args { data: Blob | ArrayBuffer ; model: string ; parameters?: Record<string, unknown> } | { inputs: unknown ; model: string ; parameters?: Record<string, unknown> }
options? Options & { includeCredentials?: boolean }
Returns

AsyncGenerator<unknown, any, unknown>

Defined in

tasks/custom/streamingRequest.ts:9


summarization

summarization: (args: { inputs: string ; model: string ; parameters?: { max_length?: number ; max_time?: number ; min_length?: number ; repetition_penalty?: number ; temperature?: number ; top_k?: number ; top_p?: number } }, options?: Options) => Promise<SummarizationOutput>

Type declaration

▸ (args, options?): Promise<SummarizationOutput>

Parameters
Name Type Description
args Object -
args.inputs string A string to be summarized
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.max_length? number (Default: None). Integer to define the maximum length in tokens of the output summary.
args.parameters.max_time? number (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
args.parameters.min_length? number (Default: None). Integer to define the minimum length in tokens of the output summary.
args.parameters.repetition_penalty? number (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
args.parameters.temperature? number (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
args.parameters.top_k? number (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
args.parameters.top_p? number (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
options? Options -
Returns

Promise<SummarizationOutput>

Defined in

tasks/nlp/summarization.ts:52


tableQuestionAnswering

tableQuestionAnswering: (args: { inputs: { query: string ; table: Record<string, string[]> } ; model: string }, options?: Options) => Promise<TableQuestionAnsweringOutput>

Type declaration

▸ (args, options?): Promise<TableQuestionAnsweringOutput>

Parameters
Name Type Description
args Object -
args.inputs Object -
args.inputs.query string The query in plain text that you want to ask the table
args.inputs.table Record<string, string[]> A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<TableQuestionAnsweringOutput>

Defined in

tasks/nlp/tableQuestionAnswering.ts:40


textClassification

textClassification: (args: { inputs: string ; model: string }, options?: Options) => Promise<TextClassificationOutput>

Type declaration

▸ (args, options?): Promise<TextClassificationOutput>

Parameters
Name Type Description
args Object -
args.inputs string A string to be classified
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<TextClassificationOutput>

Defined in

tasks/nlp/textClassification.ts:26


textGeneration

textGeneration: (args: { inputs: string ; model: string ; parameters?: { do_sample?: boolean ; max_new_tokens?: number ; max_time?: number ; num_return_sequences?: number ; repetition_penalty?: number ; return_full_text?: boolean ; temperature?: number ; top_k?: number ; top_p?: number } }, options?: Options) => Promise<TextGenerationOutput>

Type declaration

▸ (args, options?): Promise<TextGenerationOutput>

Parameters
Name Type Description
args Object -
args.inputs string A string to be generated from
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.do_sample? boolean (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
args.parameters.max_new_tokens? number (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
args.parameters.max_time? number (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
args.parameters.num_return_sequences? number (Default: 1). Integer. The number of proposition you want to be returned.
args.parameters.repetition_penalty? number (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
args.parameters.return_full_text? boolean (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
args.parameters.temperature? number (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
args.parameters.top_k? number (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
args.parameters.top_p? number (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
options? Options -
Returns

Promise<TextGenerationOutput>

Defined in

tasks/nlp/textGeneration.ts:60


textGenerationStream

textGenerationStream: (args: { inputs: string ; model: string ; parameters?: { do_sample?: boolean ; max_new_tokens?: number ; max_time?: number ; num_return_sequences?: number ; repetition_penalty?: number ; return_full_text?: boolean ; temperature?: number ; top_k?: number ; top_p?: number } }, options?: Options) => AsyncGenerator<TextGenerationStreamOutput, any, unknown>

Type declaration

▸ (args, options?): AsyncGenerator<TextGenerationStreamOutput, any, unknown>

Parameters
Name Type Description
args Object -
args.inputs string A string to be generated from
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.do_sample? boolean (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
args.parameters.max_new_tokens? number (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
args.parameters.max_time? number (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
args.parameters.num_return_sequences? number (Default: 1). Integer. The number of proposition you want to be returned.
args.parameters.repetition_penalty? number (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
args.parameters.return_full_text? boolean (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
args.parameters.temperature? number (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
args.parameters.top_k? number (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
args.parameters.top_p? number (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
options? Options -
Returns

AsyncGenerator<TextGenerationStreamOutput, any, unknown>

Defined in

tasks/nlp/textGenerationStream.ts:87


textToImage

textToImage: (args: { inputs: string ; model: string ; parameters?: { guidance_scale?: number ; height?: number ; negative_prompt?: string ; num_inference_steps?: number ; width?: number } }, options?: Options) => Promise<Blob>

Type declaration

▸ (args, options?): Promise<Blob>

Parameters
Name Type Description
args Object -
args.inputs string The text to generate an image from
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.guidance_scale? number Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text prompt, usually at the expense of lower image quality.
args.parameters.height? number The height in pixels of the generated image
args.parameters.negative_prompt? string An optional negative prompt for the image generation
args.parameters.num_inference_steps? number The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
args.parameters.width? number The width in pixels of the generated image
options? Options -
Returns

Promise<Blob>

Defined in

tasks/cv/textToImage.ts:41


tokenClassification

tokenClassification: (args: { inputs: string ; model: string ; parameters?: { aggregation_strategy?: "none" | "simple" | "first" | "average" | "max" } }, options?: Options) => Promise<TokenClassificationOutput>

Type declaration

▸ (args, options?): Promise<TokenClassificationOutput>

Parameters
Name Type Description
args Object -
args.inputs string A string to be classified
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters? Object -
args.parameters.aggregation_strategy? "none" | "simple" | "first" | "average" | "max" (Default: simple). There are several aggregation strategies: none: Every token gets classified without further aggregation. simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score.
options? Options -
Returns

Promise<TokenClassificationOutput>

Defined in

tasks/nlp/tokenClassification.ts:57


translation

translation: (args: { inputs: string ; model: string }, options?: Options) => Promise<TranslationOutput>

Type declaration

▸ (args, options?): Promise<TranslationOutput>

Parameters
Name Type Description
args Object -
args.inputs string A string to be translated
args.model string The model to use. Can be a full URL for HF inference endpoints.
options? Options -
Returns

Promise<TranslationOutput>

Defined in

tasks/nlp/translation.ts:22


zeroShotClassification

zeroShotClassification: (args: { inputs: string | string[] ; model: string ; parameters: { candidate_labels: string[] ; multi_label?: boolean } }, options?: Options) => Promise<ZeroShotClassificationOutput>

Type declaration

▸ (args, options?): Promise<ZeroShotClassificationOutput>

Parameters
Name Type Description
args Object -
args.inputs string | string[] a string or list of strings
args.model string The model to use. Can be a full URL for HF inference endpoints.
args.parameters Object -
args.parameters.candidate_labels string[] a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end.
args.parameters.multi_label? boolean (Default: false) Boolean that is set to True if classes can overlap
options? Options -
Returns

Promise<ZeroShotClassificationOutput>

Defined in

tasks/nlp/zeroShotClassification.ts:34

Methods

endpoint

endpoint(endpointUrl): HfInferenceEndpoint

Returns copy of HfInference tied to a specified endpoint.

Parameters

Name Type
endpointUrl string

Returns

HfInferenceEndpoint

Defined in

HfInference.ts:42