Edit model card

Model Card for text2sql

LLM instruction finetuned for Text-to-SQL task.

How to Get Started with the Model

Use the code below to get started with the model.

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

model = AutoModelForCausalLM.from_pretrained(
                                "dataeaze/dataeaze-text2sql-codellama_7b_instruct-clinton_text_to_sql_v1", 
                                torch_dtype=torch.bfloat16,
                                device_map='auto'
                                )

tokenizer = AutoTokenizer.from_pretrained("dataeaze/dataeaze-text2sql-codellama_7b_instruct-clinton_text_to_sql_v1")
# print("model device :", model.device)
tokenizer.pad_token = tokenizer.eos_token
model.eval()

prompt = """ Below are sql tables schemas paired with instruction that describes a task. 
Using valid SQLite, write a response that appropriately completes the request for the provided tables. 
### Instruction: How many transactions were made by a customer in a specific month? 
### Database: RewardsProgramDB61 
### Input: 
CREATE SCHEMA RewardsProgram;

CREATE TABLE Customer (
    CustomerID INT NOT NULL AUTO_INCREMENT,
    FirstName VARCHAR(50) NOT NULL,
    LastName VARCHAR(50) NOT NULL,
    Email VARCHAR(100) UNIQUE NOT NULL,
    Phone VARCHAR(20) UNIQUE,
    DateOfBirth DATE,
    PRIMARY KEY (CustomerID)
);

CREATE TABLE Membership (
    MembershipID INT NOT NULL AUTO_INCREMENT,
    MembershipType VARCHAR(50) NOT NULL,
    DiscountPercentage DECIMAL(5, 2) NOT NULL,
    ValidFrom DATETIME,
    ValidTo DATETIME,
    CustomerID INT NOT NULL,
    PRIMARY KEY (MembershipID),
    FOREIGN KEY (CustomerID) REFERENCES Customer(CustomerID)
);

CREATE TABLE Transaction (
    TransactionID INT NOT NULL AUTO_INCREMENT,
    TransactionDate TIMESTAMP,
    TotalAmount DECIMAL(10, 2) NOT NULL,
    CustomerID INT NOT NULL,
    PRIMARY KEY (TransactionID),
    FOREIGN KEY (CustomerID) REFERENCES Customer(CustomerID)
);

CREATE TABLE TransactionDetail (
    TransactionDetailID INT NOT NULL AUTO_INCREMENT,
    TransactionID INT NOT NULL,
    ProductID INT NOT NULL,
    Quantity INT NOT NULL,
    UnitPrice DECIMAL(10, 2) NOT NULL,
    PRIMARY KEY (TransactionDetailID),
    FOREIGN KEY (TransactionID) REFERENCES Transaction(TransactionID),
    FOREIGN KEY (ProductID) REFERENCES Product(ProductID)
);

CREATE TABLE Product (
    ProductID INT NOT NULL AUTO_INCREMENT,
    ProductName VARCHAR(100) NOT NULL,
    UnitPrice DECIMAL(10, 2) NOT NULL,
    AvailableQuantity INT NOT NULL,
    CreatedDate DATETIME,
    PRIMARY KEY (ProductID)
);

ALTER TABLE Membership ADD CONSTRAINT FK_Membership_Customer FOREIGN KEY (CustomerID) REFERENCES Customer(CustomerID);

ALTER TABLE TransactionDetail ADD CONSTRAINT FK_TransactionDetail_Transaction FOREIGN KEY (TransactionID) REFERENCES Transaction(TransactionID);

ALTER TABLE TransactionDetail ADD CONSTRAINT FK_TransactionDetail_Product FOREIGN KEY (ProductID) REFERENCES Product(ProductID);"
"""

input_ids = tokenizer(prompt, padding=True, return_tensors='pt')
outputs = model.generate(
    input_ids=input_ids['input_ids'].to(model.device),
    attention_mask=input_ids['attention_mask'].to(model.device),
    max_new_tokens=3072,
)

generated_query = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(generated_query)

Results

model-index:
  - name: dataeaze/dataeaze-text2sql-codellama_7b_instruct-dzsql
    results:
    - task:
        type: text-to-sql
      dataset:
        name: SPIDER 1.0
        type: text-to-sql
      metrics:
        - name: Execution with Values
          type: Execution with Values
          value: 64.3
        - name: Exact Set Match without Values
          type: Exact Set Match without Values
          value: 29.6
      source:
        name: Spider 1.0 - Leaderboard
        url: https://yale-lily.github.io/spider
Downloads last month
12
Safetensors
Model size
6.74B params
Tensor type
BF16
·
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.