[ { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=zlw6AHwukB", "bibtext":"@inproceedings{\nli2024a,\ntitle={A Survey on Deep Learning for Theorem Proving},\nauthor={Zhaoyu Li and Jialiang Sun and Logan Murphy and Qidong Su and Zenan Li and Xian Zhang and Kaiyu Yang and Xujie Si},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=zlw6AHwukB}\n}", "abstract":"Theorem proving is a fundamental aspect of mathematics, spanning from informal reasoning in natural language to rigorous derivations in formal systems. In recent years, the advancement of deep learning, especially the emergence of large language models, has sparked a notable surge of research exploring these techniques to enhance the process of theorem proving. This paper presents a comprehensive survey of deep learning for theorem proving by offering (i) a thorough review of existing approaches across various tasks such as autoformalization, premise selection, proofstep generation, and proof search; (ii) an extensive summary of curated datasets and strategies for synthetic data generation; (iii) a detailed analysis of evaluation metrics and the performance of state-of-the-art methods; and (iv) a critical discussion on the persistent challenges and the promising avenues for future exploration. Our survey aims to serve as a foundational reference for deep learning approaches in theorem proving, inspiring and catalyzing further research endeavors in this rapidly growing field. A curated list of papers is available at https:\/\/github.com\/zhaoyu-li\/DL4TP.", "title":"A Survey on Deep Learning for Theorem Proving", "authors":[ "Zhaoyu Li", "Jialiang Sun", "Logan Murphy", "Qidong Su", "Zenan Li", "Xian Zhang", "Kaiyu Yang", "Xujie Si" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.09939", "GitHub":[ "https:\/\/github.com\/zhaoyu-li\/dl4tp" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":0 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=zl16jLb91v", "bibtext":"@inproceedings{\ndurmus2024towards,\ntitle={Towards Measuring the Representation of Subjective Global Opinions in Language Models},\nauthor={Esin DURMUS and Karina Nguyen and Thomas Liao and Nicholas Schiefer and Amanda Askell and Anton Bakhtin and Carol Chen and Zac Hatfield-Dodds and Danny Hernandez and Nicholas Joseph and Liane Lovitt and Sam McCandlish and Orowa Sikder and Alex Tamkin and Janel Thamkul and Jared Kaplan and Jack Clark and Deep Ganguli},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=zl16jLb91v}\n}", "abstract":"Large language models (LLMs) may not equitably represent diverse global perspectives on societal issues. In this paper, we develop a quantitative framework to evaluate whose opinions model-generated responses are more similar to. We first build a dataset, GlobalOpinionQA, comprised of questions and answers from cross-national surveys designed to capture diverse opinions on global issues across different countries. Next, we define a metric that quantifies the similarity between LLM-generated survey responses and human responses, conditioned on country. With our framework, we run three experiments on an LLM trained to be helpful, honest, and harmless with Constitutional AI. By default, LLM responses tend to be more similar to the opinions of certain populations, such as those from the USA, and some European and South American countries, highlighting the potential for biases. When we prompt the model to consider a particular country's perspective, responses shift to be more similar to the opinions of the prompted populations, but can reflect harmful cultural stereotypes. When we translate GlobalOpinionQA questions to a target language, the model's responses do not necessarily become the most similar to the opinions of speakers of those languages. We will release our dataset for others to use and build on upon acceptance.", "title":"Towards Measuring the Representation of Subjective Global Opinions in Language Models", "authors":[ "Esin DURMUS", "Karina Nguyen", "Thomas Liao", "Nicholas Schiefer", "Amanda Askell", "Anton Bakhtin", "Carol Chen", "Zac Hatfield-Dodds", "Danny Hernandez", "Nicholas Joseph", "Liane Lovitt", "Sam McCandlish", "Orowa Sikder", "Alex Tamkin", "Janel Thamkul", "Jared Kaplan", "Jack Clark", "Deep Ganguli" ], "id":"Conference", "type":"Poster", "arxiv_id":"2306.16388", "GitHub":[ "https:\/\/github.com\/salt-nlp\/culturebank" ], "paper_page":"https:\/\/huggingface.co\/papers\/2306.16388", "n_linked_authors":6, "upvotes":6, "num_comments":0, "n_authors":18, "Models":[ ], "Datasets":[ "Anthropic\/llm_global_opinions" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":1 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=zZa7Ke7WAJ", "bibtext":"@inproceedings{\nxia2024top,\ntitle={Top Leaderboard Ranking = Top Coding Proficiency, Always? EvoEval: Evolving Coding Benchmarks via {LLM}},\nauthor={Chunqiu Steven Xia and Yinlin Deng and LINGMING ZHANG},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=zZa7Ke7WAJ}\n}", "abstract":"Large language models (LLMs) have become the go-to choice for code generation tasks, with an exponential increase in the training, development, and usage of LLMs specifically for code generation. To evaluate the ability of LLMs on code, both academic and industry practitioners rely on popular handcrafted benchmarks. However, prior benchmarks contain only a very limited set of problems, both in quantity and variety. Further, due to popularity and age, many benchmarks are prone to data leakage where example solutions can be readily found on the web and thus potentially in training data. Such limitations inevitably lead us to inquire: Is the leaderboard performance on existing benchmarks reliable and comprehensive enough to measure the program synthesis ability of LLMs? To address this, we introduce EvoEval \u2013 a program synthesis benchmark suite created by evolving existing benchmarks into different targeted domains for a comprehensive evaluation of LLM coding abilities. Our study on 51 LLMs shows that compared to the high performance obtained on standard benchmarks like HumanEval, there is a significant drop in performance (on average 39.4%) when using EvoEval. Additionally, the decrease in performance can range from 19.6% to 47.7%, leading to drastic ranking changes amongst LLMs and showing potential overfitting of existing benchmarks. Furthermore, we showcase various insights including the brittleness of instruction-following models when encountering rewording or subtle changes as well as the importance of learning problem composition and decomposition. EvoEval not only provides comprehensive benchmarks, but can be used to further evolve arbitrary problems to keep up with advances and the ever-changing landscape of LLMs for code. We have open-sourced our benchmarks, tools, and all LLM-generated code at https:\/\/github.com\/evo-eval\/evoeval.", "title":"Top Leaderboard Ranking = Top Coding Proficiency, Always? EvoEval: Evolving Coding Benchmarks via LLM", "authors":[ "Chunqiu Steven Xia", "Yinlin Deng", "LINGMING ZHANG" ], "id":"Conference", "type":"Oral", "arxiv_id":"2403.19114", "GitHub":[ "https:\/\/github.com\/evo-eval\/evoeval" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.19114", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":2 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=zSf8PJyQb2", "bibtext":"@inproceedings{\nmiller2024transformer,\ntitle={Transformer Circuit Evaluation Metrics Are Not Robust},\nauthor={Joseph Miller and Bilal Chughtai and William Saunders},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=zSf8PJyQb2}\n}", "abstract":"Mechanistic interpretability work attempts to reverse engineer the learned algorithms present inside neural networks. One focus of this work has been to discover 'circuits' - subgraphs of the full model that explain behaviour on specific tasks. But how do we measure the performance of such circuits? Prior work has attempted to measure circuit 'faithfulness' - the degree to which the circuit replicates the performance of the full model. In this work, we survey many considerations for designing experiments that measure circuit faithfulness by ablating portions of the model's computation. Concerningly, we find existing methods are highly sensitive to seemingly insignificant changes in the ablation methodology. We conclude that existing circuit faithfulness scores reflect _both_ the methodological choices of researchers as well as the actual components of the circuit - the task a circuit is required to perform depends on the ablation used to test it. The ultimate goal of mechanistic interpretability work is to understand neural networks, so we emphasize the need for more clarity in the precise claims being made about circuits. We open source a library at [this https URL](https:\/\/github.com\/UFO-101\/auto-circuit) that includes highly efficient implementations of a wide range of ablation methodologies and circuit discovery algorithms.", "title":"Transformer Circuit Evaluation Metrics Are Not Robust", "authors":[ "Joseph Miller", "Bilal Chughtai", "William Saunders" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":3 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=z7FvXbyyrM", "bibtext":"@inproceedings{\nhuh2024longform,\ntitle={Long-Form Answers to Visual Questions from Blind and Low Vision People},\nauthor={Mina Huh and Fangyuan Xu and Yi-Hao Peng and Chongyan Chen and Hansika Murugu and Danna Gurari and Eunsol Choi and Amy Pavel},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=z7FvXbyyrM}\n}", "abstract":"Vision language models can now generate long-form answers to questions about images \u2013 long-form visual question answers (LFVQA). We contribute VizWiz-LF, a dataset of long-form answers to visual questions posed by blind and low vision (BLV) users. VizWiz-LF contains 4.2k long-form answers to 600 visual questions, collected from human expert describers and six VQA models. We develop and annotate functional roles of sentences of LFVQA and demonstrate that long-form answers contain information beyond the question answer such as explanations and suggestions to retake photos. We further conduct automatic and human evaluations involving BLV and sighted people to evaluate long-form answers. While BLV people perceive both human-written and generated long-form answers as plausible, generated answers often hallucinate incorrect visual details, especially for unanswerable visual questions (e.g., blurry or irrelevant images). To reduce hallucinations, we evaluate VQA models on their ability to abstain from answering unanswerable questions.", "title":"Long-Form Answers to Visual Questions from Blind and Low Vision People", "authors":[ "Mina Huh", "Fangyuan Xu", "Yi-Hao Peng", "Chongyan Chen", "Hansika Murugu", "Danna Gurari", "Eunsol Choi", "Amy Pavel" ], "id":"Conference", "type":"Oral", "arxiv_id":"2408.06303", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":4 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=yoVRyrEgix", "bibtext":"@inproceedings{\nsharma2024locating,\ntitle={Locating and Editing Factual Associations in Mamba},\nauthor={Arnab Sen Sharma and David Atkinson and David Bau},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=yoVRyrEgix}\n}", "abstract":"We investigate the mechanisms of factual recall in the Mamba state space model. Our work is inspired by previous findings in autoregressive transformer language models suggesting that their knowledge recall is localized to particular modules at specific token locations; we therefore ask whether factual recall in Mamba can be similarly localized. To investigate this, we conduct four lines of experiments on Mamba. First, we apply causal tracing or interchange interventions to localize key components inside Mamba that are responsible for recalling facts, revealing that specific components within middle layers show strong causal effects at the last token of the subject, while the causal effect of intervening on later layers is most pronounced at the last token of the prompt, matching previous findings on autoregressive transformers. Second, we show that rank-one model editing methods can successfully insert facts at specific locations, again resembling findings on transformer LMs. Finally we adapt attention-knockout techniques to Mamba in order to dissect information flow during factual recall. We compare Mamba directly to a similar-sized autoregressive transformer LM and conclude that despite significant differences in architectures, when it comes to factual recall, the two architectures share many similarities.", "title":"Locating and Editing Factual Associations in Mamba", "authors":[ "Arnab Sen Sharma", "David Atkinson", "David Bau" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03646", "GitHub":[ "https:\/\/github.com\/arnab-api\/romba" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.03646", "n_linked_authors":2, "upvotes":3, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":5 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=yfyHxvVzZT", "bibtext":"@inproceedings{\nkim2024does,\ntitle={Does Incomplete Syntax Influence Korean Language Model? Focusing on Word Order and Case Markers},\nauthor={Jong Myoung Kim and Young-Jun Lee and Yong-Jin Han and Ho-Jin Choi and Sangkeun Jung},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=yfyHxvVzZT}\n}", "abstract":"Syntactic elements, such as word order and case markers, are fundamental in natural language processing. Recent studies show that syntactic information boosts language model performance and offers clues for people to understand their learning mechanisms. Unlike languages with a fixed word order such as English, Korean allows for varied word sequences, despite its canonical structure, due to case markers that indicate the functions of sentence components. This study explores whether Korean language models can accurately capture this flexibility. We note that incomplete word orders and omitted case markers frequently appear in ordinary Korean communication. To investigate this further, we introduce the Syntactically Incomplete Korean (SIKO) dataset. Through SIKO, we assessed Korean language models\u2019 flexibility with incomplete syntax and confirmed the dataset\u2019s training value. Results indicate these models reflect Korean\u2019s inherent flexibility, accurately handling incomplete inputs. Moreover, fine-tuning with SIKO enhances the ability to handle common incomplete Korean syntactic forms. The dataset\u2019s simple construction process, coupled with significant performance enhancements, solidifies its standing as an effective data augmentation technique. The SIKO will become accessible post-publication.", "title":"Does Incomplete Syntax Influence Korean Language Model? Focusing on Word Order and Case Markers", "authors":[ "Jong Myoung Kim", "Young-Jun Lee", "Yong-Jin Han", "Ho-Jin Choi", "Sangkeun Jung" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.09184", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":6 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ybaK4asBT2", "bibtext":"@inproceedings{\nlu2024llm,\ntitle={{LLM} Discussion: Enhancing the Creativity of Large Language Models via Discussion Framework and Role-Play},\nauthor={Li-Chun Lu and Shou-Jen Chen and Tsung-Min Pai and Chan-Hung Yu and Hung-yi Lee and Shao-Hua Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ybaK4asBT2}\n}", "abstract":"Large language models (LLMs) have shown exceptional proficiency in natural language processing but often fall short of generating creative and original responses to open-ended questions. To enhance LLM creativity, our key insight is to emulate the human process of inducing collective creativity through engaging discussions with participants from diverse backgrounds and perspectives. To this end, we propose LLM Discussion, a three-phase discussion framework that facilitates vigorous and diverging idea exchanges and ensures convergence to creative answers. Moreover, we adopt a role-playing technique by assigning distinct roles to LLMs to combat the homogeneity of LLMs. We evaluate the efficacy of the proposed framework with the Alternative Uses Test, Similarities Test, Instances Test, and Scientific Creativity Test through both LLM evaluation and human study. The results show that our proposed framework outperforms single-LLM approaches and existing multi-LLM frameworks across various creativity metrics. The code is available at https:\/\/github.com\/lawraa\/LLM-Discussion.", "title":"LLM Discussion: Enhancing the Creativity of Large Language Models via Discussion Framework and Role-Play", "authors":[ "Li-Chun Lu", "Shou-Jen Chen", "Tsung-Min Pai", "Chan-Hung Yu", "Hung-yi Lee", "Shao-Hua Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.06373", "GitHub":[ "https:\/\/github.com\/lawraa\/llm-discussion" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.06373", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":7 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=yK8MT91dQY", "bibtext":"@inproceedings{\nzhan2024large,\ntitle={Large Language Models are Capable of Offering Cognitive Reappraisal, if Guided},\nauthor={Hongli Zhan and Allen Zheng and Yoon Kyung Lee and Jina Suh and Junyi Jessy Li and Desmond Ong},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=yK8MT91dQY}\n}", "abstract":"Large language models (LLMs) have offered new opportunities for emotional support, and recent work has shown that they can produce empathic responses to people in distress. However, long-term mental well-being requires emotional self-regulation, where a one-time empathic response falls short. This work takes a first step by engaging with cognitive reappraisals, a strategy from psychology practitioners that uses language to targetedly change negative appraisals that an individual makes of the situation; such appraisals is known to sit at the root of human emotional experience. We hypothesize that psychologically grounded principles could enable such advanced psychology capabilities in LLMs, and design RESORT which consists of a series of reappraisal constitutions across multiple dimensions that can be used as LLM instructions. We conduct a first-of-its-kind expert evaluation (by clinical psychologists with M.S. or Ph.D. degrees) of an LLM\u2019s zero-shot ability to generate cognitive reappraisal responses to medium-length social media messages asking for support. This fine-grained evaluation showed that even LLMs at the 7B scale guided by RESORT are capable of generating empathic responses that can help users reappraise their situations.", "title":"Large Language Models are Capable of Offering Cognitive Reappraisal, if Guided", "authors":[ "Hongli Zhan", "Allen Zheng", "Yoon Kyung Lee", "Jina Suh", "Junyi Jessy Li", "Desmond Ong" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01288", "GitHub":[ "https:\/\/github.com\/honglizhan\/resort_cognitive_reappraisal" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":8 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=yK2eGE8QVW", "bibtext":"@inproceedings{\nshen2024nemoaligner,\ntitle={NeMo-Aligner: Scalable Toolkit for Efficient Model Alignment},\nauthor={Gerald Shen and Zhilin Wang and Olivier Delalleau and Jiaqi Zeng and Yi Dong and Daniel Egert and Shengyang Sun and Jimmy J. Zhang and Sahil Jain and Ali Taghibakhshi and Markel Sanz Ausin and Ashwath Aithal and Oleksii Kuchaiev},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=yK2eGE8QVW}\n}", "abstract":"Aligning Large Language Models (LLMs) with human values and preferences is essential for making them helpful and safe.\nHowever, building efficient tools to perform alignment can be challenging, especially for the largest and most competent LLMs which often contain tens or hundreds of billions of parameters. \nWe create NeMo-Aligner, a toolkit for model alignment that can efficiently scale to a thousand GPUs for training the largest open-source LLMs such as Nemotron 4 340B and Llama 3.1 405B. \nNeMo-Aligner comes with highly optimized and scalable implementations for major paradigms of model alignment such as: Reinforcement Learning from Human Feedback (RLHF), Direct Preference Optimization (DPO), SteerLM, and Self-Play Fine-Tuning (SPIN).\nAdditionally, our toolkit supports running most of the alignment techniques in a Parameter Efficient Fine-Tuning (PEFT) setting.\nNeMo-Aligner is designed for extensibility, allowing support for other alignment techniques with minimal effort.\nIt is open-sourced with Apache 2.0 License and we invite community contributions at https:\/\/github.com\/NVIDIA\/NeMo-Aligner.", "title":"NeMo-Aligner: Scalable Toolkit for Efficient Model Alignment", "authors":[ "Gerald Shen", "Zhilin Wang", "Olivier Delalleau", "Jiaqi Zeng", "Yi Dong", "Daniel Egert", "Shengyang Sun", "Jimmy J. Zhang", "Sahil Jain", "Ali Taghibakhshi", "Markel Sanz Ausin", "Ashwath Aithal", "Oleksii Kuchaiev" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.01481", "GitHub":[ "https:\/\/github.com\/nvidia\/nemo-aligner" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.01481", "n_linked_authors":10, "upvotes":25, "num_comments":1, "n_authors":13, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":9 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=yIEyHP7AvH", "bibtext":"@inproceedings{\nghahroodi2024khayyam,\ntitle={Khayyam Challenge (Persian{MMLU}): Is Your {LLM} Truly Wise to The Persian Language?},\nauthor={Omid Ghahroodi and Marzia Nouri and Mohammad Vali Sanian and Alireza Sahebi and Doratossadat Dastgheib and Ehsaneddin Asgari and Mahdieh Soleymani Baghshah and Mohammad Hossein Rohban},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=yIEyHP7AvH}\n}", "abstract":"Evaluating Large Language Models (LLMs) is challenging due to their generative nature, necessitating precise evaluation methodologies. Additionally, non-English LLM evaluation lags behind English, resulting in the absence or weakness of LLMs for many languages.\nIn response to this necessity, we introduce Khayyam Challenge (also known as PersianMMLU), a meticulously curated collection comprising 20,805 four-choice questions sourced from 38 diverse tasks extracted from Persian examinations, spanning a wide spectrum of subjects, complexities, and ages. The primary objective of the Khayyam Challenge is to facilitate the rigorous evaluation of LLMs that support the Persian language. Distinctive features of the Khayyam Challenge are (i) its comprehensive coverage of various topics, including literary comprehension, mathematics, sciences, logic, intelligence testing, etc aimed at assessing different facets of LLMs such as language comprehension, reasoning, and information retrieval across various educational stages, from lower primary school to upper secondary school (ii) its inclusion of rich metadata such as human response rates, difficulty levels, and descriptive answers (iii) its utilization of new data to avoid data contamination issues prevalent in existing frameworks (iv) its use of original, non-translated data tailored for Persian speakers, ensuring the framework is free from translation challenges and errors while encompassing cultural nuances (v) its inherent scalability for future data updates and evaluations without requiring special human effort. Previous works lacked an evaluation framework that combined all of these features into a single comprehensive benchmark. Furthermore, we evaluate a wide range of existing LLMs that support the Persian language, with statistical analyses and interpretations of their outputs. We believe that the Khayyam Challenge will improve advancements in LLMs for the Persian language by highlighting the existing limitations of current models, while also enhancing the precision and depth of evaluations on LLMs, even within the English language context.", "title":"Khayyam Challenge (PersianMMLU): Is Your LLM Truly Wise to The Persian Language?", "authors":[ "Omid Ghahroodi", "Marzia Nouri", "Mohammad Vali Sanian", "Alireza Sahebi", "Doratossadat Dastgheib", "Ehsaneddin Asgari", "Mahdieh Soleymani Baghshah", "Mohammad Hossein Rohban" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":10 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=y7JnjDcIQa", "bibtext":"@inproceedings{\nanagnostidis2024how,\ntitle={How Susceptible are {LLM}s to Influence in Prompts?},\nauthor={Sotiris Anagnostidis and Jannis Bulian},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=y7JnjDcIQa}\n}", "abstract":"Large Language Models (LLMs) are highly sensitive to prompts, including additional context provided therein. As LLMs grow in capability, understanding their prompt-sensitivity becomes increasingly crucial for ensuring reliable and robust performance, particularly since evaluating these models becomes more challenging. In this work, we investigate how current models (Llama, Mixtral, Falcon) respond when presented with additional input from another model, mimicking a scenario where a more capable model -- or a system with access to more external information -- provides supplementary information to the target model. Across a diverse spectrum of question-answering tasks, we study how an LLM's response to multiple-choice questions changes when the prompt includes a prediction and explanation from another model. Specifically, we explore the influence of the presence of an explanation, the stated authoritativeness of the source, and the stated confidence of the supplementary input. Our findings reveal that models are strongly influenced, and when explanations are provided they are swayed irrespective of the quality of the explanation. The models are more likely to be swayed if the input is presented as being authoritative or confident, but the effect is small in size. This study underscores the significant prompt-sensitivity of LLMs and highlights the potential risks of incorporating outputs from external sources without thorough scrutiny and further validation. As LLMs continue to advance, understanding and mitigating such sensitivities will be crucial for their reliable and trustworthy deployment.", "title":"How Susceptible are LLMs to Influence in Prompts?", "authors":[ "Sotiris Anagnostidis", "Jannis Bulian" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":11 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=y6aGT625Lk", "bibtext":"@inproceedings{\npark2024paireval,\ntitle={PairEval: Open-domain Dialogue Evaluation Metric with Pairwise Comparisons},\nauthor={ChaeHun Park and Minseok Choi and Dohyun Lee and Jaegul Choo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=y6aGT625Lk}\n}", "abstract":"Building a reliable and automated evaluation metric is a necessary but challenging problem for open-domain dialogue systems. \nRecent studies proposed evaluation metrics that assess generated responses by considering their relevance to previous dialogue histories. \nAlthough effective, these metrics evaluate individual responses directly rather than considering their relative quality compared to other responses. To handle this, we propose PairEval, a novel dialogue evaluation metric for assessing responses by comparing their quality against responses in different conversations. Our metric is built on top of open-sourced and moderate-size language models, and we make them specialized in pairwise comparison between dialogue responses. Extensive experiments on multiple benchmarks demonstrate that our metric exhibits a higher correlation with human judgments than baseline metrics. We also find that the proposed comparative metric is more robust in detecting common failures from open-domain dialogue systems, including repetition and speaker insensitivity. The codes and models will be publicly available after the paper is accepted.", "title":"PairEval: Open-domain Dialogue Evaluation Metric with Pairwise Comparisons", "authors":[ "ChaeHun Park", "Minseok Choi", "Dohyun Lee", "Jaegul Choo" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":12 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=y6SqbJfCSk", "bibtext":"@inproceedings{\nqin2024hgrn,\ntitle={{HGRN}2: Gated Linear {RNN}s with State Expansion},\nauthor={Zhen Qin and Songlin Yang and Weixuan Sun and Xuyang Shen and Dong Li and Weigao Sun and Yiran Zhong},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=y6SqbJfCSk}\n}", "abstract":"Hierarchically gated linear RNN (HGRN) has demonstrated competitive training speed and performance in language modeling while offering efficient inference. However, the recurrent state size of HGRN remains relatively small, limiting its expressiveness. To address this issue, we introduce a simple outer product-based state expansion mechanism, which significantly enlarges the recurrent state size without introducing any additional parameters. This enhancement also provides a linear attention interpretation for HGRN2, enabling hardware-efficient training. Our extensive experiments verify the advantage of HGRN2 over HGRN consistently across different settings and comptetive to other recurrent models.", "title":"HGRN2: Gated Linear RNNs with State Expansion", "authors":[ "Zhen Qin", "Songlin Yang", "Weixuan Sun", "Xuyang Shen", "Dong Li", "Weigao Sun", "Yiran Zhong" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/sustcsonglin\/flash-linear-attention" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":13 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xm8zYRfrqE", "bibtext":"@inproceedings{\nkortukov2024studying,\ntitle={Studying Large Language Model Behaviors Under Context-Memory Conflicts With Real Documents},\nauthor={Evgenii Kortukov and Alexander Rubinstein and Elisa Nguyen and Seong Joon Oh},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xm8zYRfrqE}\n}", "abstract":"Retrieval-augmented generation (RAG) mitigates many problems of fully parametric language models, such as temporal degradation, hallucinations, and lack of grounding. In RAG, the model\u2019s knowledge can be updated from documents provided in context. This leads to cases of conflict between the model\u2019s parametric knowledge and the contextual information, where the model may not always update its knowledge. Previous work studied context-memory knowledge conflicts by creating synthetic documents that contradict the model\u2019s correct parametric answers. We present a framework for studying such knowledge conflicts in a realistic setup. We update incorrect parametric knowledge using real conflicting documents. This reflects how knowledge conflicts arise in practice. In this realistic scenario, we find that knowledge updates fail less often than previously reported. In cases where the models still fail to update their answers, we find a parametric bias: the incorrect parametric answer appearing in context makes the knowledge update likelier to fail. These results suggest that the factual parametric knowledge of LLMs can negatively influence their reading abilities and behaviors. Our code is available at https:\/\/github.com\/kortukov\/realistic_knowledge_conflicts\/.", "title":"Studying Large Language Model Behaviors Under Context-Memory Conflicts With Real Documents", "authors":[ "Evgenii Kortukov", "Alexander Rubinstein", "Elisa Nguyen", "Seong Joon Oh" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":14 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xdg4CS5mkl", "bibtext":"@inproceedings{\nzhu2024investigating,\ntitle={Investigating Instruction Tuning Large Language Models on Graphs},\nauthor={Kerui Zhu and Bo-Wei Huang and Bowen Jin and Yizhu Jiao and Ming Zhong and Kevin Chang and Shou-De Lin and Jiawei Han},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xdg4CS5mkl}\n}", "abstract":"Inspired by the recent advancements of Large Language Models (LLMs) in NLP tasks, there's growing interest in applying LLMs to graph-related tasks. This study delves into the capabilities of instruction-following LLMs for engaging with real-world graphs, aiming to offer empirical insights into how LLMs can effectively interact with graphs and generalize across graph tasks. We begin by constructing a dataset designed for instruction tuning, which comprises a diverse collection of 79 graph-related tasks from academic and e-commerce domains, featuring 44,240 training instances and 18,960 test samples. Utilizing this benchmark, our initial investigation focuses on identifying the optimal graph representation that serves as a conduit for LLMs to understand complex graph structures. Our findings indicate that JSON format for graph representation consistently outperforms natural language and code formats across various LLMs and graph types. Furthermore, we examine the key factors that influence the generalization abilities of instruction-tuned LLMs by evaluating their performance on both in-domain and out-of-domain graph tasks.", "title":"Investigating Instruction Tuning Large Language Models on Graphs", "authors":[ "Kerui Zhu", "Bo-Wei Huang", "Bowen Jin", "Yizhu Jiao", "Ming Zhong", "Kevin Chang", "Shou-De Lin", "Jiawei Han" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhukerui\/graph-instruction-tuning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":15 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xWYRL1eR74", "bibtext":"@inproceedings{\nwilliams2024fuseing,\ntitle={{FUSE}-ing Language Models: Zero-Shot Adapter Discovery for Prompt Optimization Across Tokenizers},\nauthor={Joshua Nathaniel Williams and J Zico Kolter},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xWYRL1eR74}\n}", "abstract":"The widespread use of large language models has resulted in a multitude of tokenizers and embedding spaces, making knowledge transfer in prompt discovery tasks difficult. In this work, we propose FUSE (Flexible Unification of Semantic Embeddings), an inexpensive approach to approximating an adapter layer that maps from one model's textual embedding space to another, even across different tokenizers. We introduce a third-order tensor-based representation of a model's embedding space that aligns semantic embeddings that have been split apart by different tokenizers, and use this representation to derive an approximation of the gradient of one model's outputs with respect to another model's embedding space. We show the efficacy of our approach via multi-objective optimization over vision-language and causal language models for image captioning and sentiment-based image captioning.", "title":"FUSE-ing Language Models: Zero-Shot Adapter Discovery for Prompt Optimization Across Tokenizers", "authors":[ "Joshua Nathaniel Williams", "J Zico Kolter" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.04816", "GitHub":[ "https:\/\/github.com\/jnwilliams\/fuse_prompt_inversion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":16 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xS6zx1aBI9", "bibtext":"@inproceedings{\nmajumder2024clin,\ntitle={{CLIN}: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization},\nauthor={Bodhisattwa Prasad Majumder and Bhavana Dalvi Mishra and Peter Jansen and Oyvind Tafjord and Niket Tandon and Li Zhang and Chris Callison-Burch and Peter Clark},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xS6zx1aBI9}\n}", "abstract":"Language agents have shown some ability to interact with an external environment, e.g., a virtual world such as ScienceWorld, to perform complex tasks, e.g., growing a plant, without the startup costs of reinforcement learning. While recent work, e.g., Reflexion, has demonstrated how such agents can also self-improve by adding a textual memory of ''hints'' learned from prior experience, such improvements have been limited both in size and scope. In contrast, our goal is a language agent that can robustly improve performance over time, including when both the task and environment are varied. Our approach is to have the agent learn a textual representation of how the world works (rather than just isolated hints), expressed as a memory of causal abstractions, to guide future decision-making. In experiments, we find CLIN is able to continually improve on repeated trials on the same task and environment, outperforming state-of-the-art reflective language agents like Reflexion by 23 points in ScienceWorld and 1.4 points in ALFWorld benchmarks. CLIN can also transfer its learning to new environments and tasks, enhancing performance by 21 points in ScienceWorld and 11 points in ALFWorld. This suggests that language agents with a textual causal memory can play a significant role in interactive environments, including being able to rapidly improve over time.", "title":"CLIN: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization", "authors":[ "Bodhisattwa Prasad Majumder", "Bhavana Dalvi Mishra", "Peter Jansen", "Oyvind Tafjord", "Niket Tandon", "Li Zhang", "Chris Callison-Burch", "Peter Clark" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.10134", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.10134", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":8, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":17 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xMt9kCv5YR", "bibtext":"@inproceedings{\ndu2024helmsman,\ntitle={Helmsman of the Masses? Evaluate the Opinion Leadership of Large Language Models in the Werewolf Game},\nauthor={Silin Du and Xiaowei Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xMt9kCv5YR}\n}", "abstract":"Large language models (LLMs) have exhibited memorable strategic behaviors in social deductive games. However, the significance of opinion leadership exhibited by LLM-based agents has been largely overlooked, which is crucial for practical applications in multi-agent and human-AI interaction settings. Opinion leaders are individuals who have a noticeable impact on the beliefs and behaviors of others within a social group. In this work, we employ the Werewolf game as a simulation platform to assess the opinion leadership of LLMs. The game includes the role of the Sheriff, tasked with summarizing arguments and recommending decision options, and therefore serves as a credible proxy for an opinion leader. We develop a framework integrating the Sheriff role and devise two novel metrics based on the critical characteristics of opinion leaders. The first metric measures the reliability of the opinion leader, and the second assesses the influence of the opinion leader on other players' decisions. We conduct extensive experiments to evaluate LLMs of different scales. In addition, we collect a Werewolf question-answering dataset (WWQA) to assess and enhance LLM's grasp of the game rules, and we also incorporate human participants for further analysis. The results suggest that the Werewolf game is a suitable test bed to evaluate the opinion leadership of LLMs, and few LLMs possess the capacity for opinion leadership.", "title":"Helmsman of the Masses? Evaluate the Opinion Leadership of Large Language Models in the Werewolf Game", "authors":[ "Silin Du", "Xiaowei Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01602", "GitHub":[ "https:\/\/github.com\/doslim\/evaluate-the-opinion-leadership-of-llms" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":18 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=xI8C7sfN1H", "bibtext":"@inproceedings{\njeong2024factual,\ntitle={Factual and Tailored Recommendation Endorsements using Language Models and Reinforcement Learning},\nauthor={Jihwan Jeong and Yinlam Chow and Guy Tennenholtz and ChihWei Hsu and Mohammad Ghavamzadeh and Craig Boutilier},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=xI8C7sfN1H}\n}", "abstract":"Recommender systems (RSs) play a central role in matching candidate items to users based on their preferences. While traditional RSs rely on user feed-back signals, conversational RSs interact with users in natural language. In this work, we develop P4LM, an _aPpealing, Precise, Preference-comprehensive and Prioritized_ language model which endorses recommended items by emphasizing specific item characteristics and their coverage to a user\u2019s preferences. P4LM uses an _embedding_ representation of a user\u2019s preferences to generate responses that are appealing, factually-grounded and tailored to the user\u2019s preferences. P4LM employs a joint reward function to measure precision, appeal, preference coverage and prioritization of preferences, which are used as AI-based feedback in a reinforcement learning-based language model framework. On the MovieLens 25M and Amazon Product Review datasets, P4LM delivers more appealing and tailored endorsements to users, as determined by auto-critic and rater evaluations.", "title":"Factual and Tailored Recommendation Endorsements using Language Models and Reinforcement Learning", "authors":[ "Jihwan Jeong", "Yinlam Chow", "Guy Tennenholtz", "ChihWei Hsu", "Mohammad Ghavamzadeh", "Craig Boutilier" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":19 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=wps3p2cqrA", "bibtext":"@inproceedings{\nli2024how,\ntitle={How Well Do {LLM}s Identify Cultural Unity in Diversity?},\nauthor={Jialin Li and Junli Wang and Junjie Hu and Ming Jiang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=wps3p2cqrA}\n}", "abstract":"Much work on the cultural awareness of large language models (LLMs) focuses on the models' sensitivity to geo-cultural diversity. However, in addition to cross-cultural differences, there also exists common ground across cultures. For instance, a bridal veil in the United States plays a similar cultural-relevant role as a honggaitou in China. In this study, we introduce a benchmark dataset CUNIT for evaluating decoder-only LLMs in understanding the cultural unity of concepts. Specifically, CUNIT consists of 1,425 evaluation examples building upon 285 traditional cultural-specific concepts across 10 countries. Based on a systematic manual annotation of cultural-relevant features per concept, we calculate the cultural association between any pair of cross-cultural concepts. Built upon this dataset, we design a contrastive matching task to evaluate the LLMs' capability to identify highly associated cross-cultural concept pairs. We evaluate 3 strong LLMs, using 3 popular prompting strategies, under the settings of either giving all extracted concept features or no features at all on CUNIT Interestingly, we find that cultural associations across countries regarding clothing concepts largely differ from food. Our analysis shows that LLMs are still limited to capturing cross-cultural associations between concepts compared to humans. Moreover, geo-cultural proximity shows a weak influence on model performance in capturing cross-cultural associations.", "title":"How Well Do LLMs Identify Cultural Unity in Diversity?", "authors":[ "Jialin Li", "Junli Wang", "Junjie Hu", "Ming Jiang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.05102", "GitHub":[ "https:\/\/github.com\/ljl0222\/CUNIT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":20 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=wi9IffRhVM", "bibtext":"@inproceedings{\nwang2024guiding,\ntitle={Guiding Language Model Reasoning with Planning Tokens},\nauthor={Xinyi Wang and Lucas Caccia and Oleksiy Ostapenko and Xingdi Yuan and William Yang Wang and Alessandro Sordoni},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=wi9IffRhVM}\n}", "abstract":"Large language models (LLMs) have recently attracted considerable interest for their ability to perform complex reasoning tasks, such as chain-of-thought (CoT) reasoning. However, most of the existing approaches to enhance this ability rely heavily on data-driven methods, while neglecting the structural aspects of the model's reasoning capacity. To encourage a more structural generation of CoT steps, we propose a hierarchical generation scheme: we let the LM generate a planning token at the start of each reasoning step, intuitively serving as a high-level plan of the current step, and add their embeddings to the model parameters. Our approach requires a negligible increase in trainable parameters (0.001%) and can be applied through either full fine-tuning or a more parameter-efficient scheme. We demonstrate our method's effectiveness by applying it to three different LLMs, showing notable accuracy improvements across three math word problem datasets and one multihop QA dataset with respect to standard fine-tuning baselines.", "title":"Guiding Language Model Reasoning with Planning Tokens", "authors":[ "Xinyi Wang", "Lucas Caccia", "Oleksiy Ostapenko", "Xingdi Yuan", "William Yang Wang", "Alessandro Sordoni" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.05707", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":21 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=wS7PxDjy6m", "bibtext":"@inproceedings{\ncheng2024dated,\ntitle={Dated Data: Tracing Knowledge Cutoffs in Large Language Models},\nauthor={Jeffrey Cheng and Marc Marone and Orion Weller and Dawn Lawrie and Daniel Khashabi and Benjamin Van Durme},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=wS7PxDjy6m}\n}", "abstract":"Large Language Models (LLMs) are often paired with a reported cutoff date, the time at which training data was gathered. \nSuch information is crucial for applications where the LLM must provide up-to-date information. However, a reported cutoff only scratches the surface. Do all sub-resources in the training data share the same cutoff? Does the model's demonstrated knowledge for these sub-resources closely align to their cutoff? We define the notion of an effective cutoff, which is distinct from the LLM's reported cutoff and differs between sub-resources. We propose a simple approach to estimate effective cutoffs of an LLM on the resource-level by probing across versions of the data. Crucially, our method does not require access to a model's pre-training data. Through our analysis, we find that effective cutoffs often drastically differ from reported cutoffs. To understand the root cause of this observation, we conduct a large-scale analysis on open pre-training datasets. Our analysis reveals two reasons for these inconsistencies: (1) temporal misalignments of CommonCrawl data due to non-trivial amounts of old data in new dumps; and (2) complications in LLM deduplication schemes involving semantic duplicates and lexical near-duplicates. Overall, our results show that cutoffs are not as simple as they have seemed and that care must be taken both by LLM dataset curators as well as practitioners who seek to use these models.", "title":"Dated Data: Tracing Knowledge Cutoffs in Large Language Models", "authors":[ "Jeffrey Cheng", "Marc Marone", "Orion Weller", "Dawn Lawrie", "Daniel Khashabi", "Benjamin Van Durme" ], "id":"Conference", "type":"Oral", "arxiv_id":"2403.12958", "GitHub":[ "https:\/\/github.com\/nexync\/dated_data" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":22 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=wLQ3I0F1oj", "bibtext":"@inproceedings{\nzhao2024large,\ntitle={Large Language Model is not a (Multilingual) Compositional Relation Reasoner},\nauthor={Jinman Zhao and Xueyan Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=wLQ3I0F1oj}\n}", "abstract":"We present a comprehensive evaluation of large language models' \ncapability to reason compositional relations through \na benchmark encompassing 1,800 test cases in both English and Chinese, \ncovering six distinct categories of composition relations: \nPositional, Comparative, Personal, Mathematical, Identity, and Other. \nWe expand our assessment to the multilingual realm by including translations of the benchmark suite into \nJapanese, French, and Korean.\nOur Multilingual Composition Relation (MCR) benchmark\naims at investigating the robustness and adaptability of LLMs in handling compositional relation reasoning across diverse linguistic contexts.", "title":"Large Language Model is not a (Multilingual) Compositional Relation Reasoner", "authors":[ "Jinman Zhao", "Xueyan Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":23 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=wF6k0aWjAu", "bibtext":"@inproceedings{\ncao2024instruction,\ntitle={Instruction Mining: Instruction Data Selection for Tuning Large Language Models},\nauthor={Yihan Cao and Yanbin Kang and Chi Wang and Lichao Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=wF6k0aWjAu}\n}", "abstract":"Large language models (LLMs) are initially pretrained for broad capabilities and then finetuned with instruction-following datasets to improve their performance in interacting with humans. Despite advances in finetuning, a standardized guideline for selecting high-quality datasets to optimize this process remains elusive. In this paper, we first propose InstructMining, an innovative method designed for automatically selecting premium instruction-following data for finetuning LLMs. Specifically, InstructMining utilizes natural language indicators as a measure of data quality, applying them to evaluate unseen datasets. During experimentation, we discover that double descent phenomenon exists in large language model finetuning. Based on this observation, we further leverage BlendSearch to help find the best subset among the entire datase. Experiment results show that InstructMining-7B achieves state-of-the-art performance on two of the most popular benchmarks: LLM-as-a-judge and OpenLLM benchmark.", "title":"Instruction Mining: Instruction Data Selection for Tuning Large Language Models", "authors":[ "Yihan Cao", "Yanbin Kang", "Chi Wang", "Lichao Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":24 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=vwIIAot0ff", "bibtext":"@inproceedings{\nblakeney2024does,\ntitle={Does your data spark joy? Performance gains from domain upsampling at the end of training},\nauthor={Cody Blakeney and Mansheej Paul and Brett W. Larsen and Sean Owen and Jonathan Frankle},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=vwIIAot0ff}\n}", "abstract":"Pretraining datasets for large language models (LLMs) have grown to trillions of tokens composed of large amounts of CommonCrawl (CC) web scrape along with smaller, domain-specific datasets. It is expensive to understand the impact of these domain-specific datasets on model capabilities as training at large FLOP scales is required to reveal significant changes to difficult and emergent benchmarks. Given the increasing cost of experimenting with pretraining data, how does one determine the optimal balance between the diversity in general web scrapes and the information density of domain specific data? In this work, we show how to leverage the smaller domain specific datasets by upsampling them relative to CC at the end of training to drive performance improvements on difficult benchmarks. This simple technique allows us to improve up to 6.90 pp on MMLU, 8.26 pp on GSM8K, and 6.17 pp on HumanEval relative to the base data mix for a 7B model trained for 1 trillion (T) tokens, thus rivaling Llama-2 (7B)\u2014a model trained for twice as long. We experiment with ablating the duration of domain upsampling from 5% to 30% of training and find that 10% to 20% percent is optimal for navigating the tradeoff between general language modeling capabilities and targeted benchmarks. We also use domain upsampling to characterize at scale the utility of individual datasets for improving various benchmarks by removing them during this final phase of training. This tool opens up the ability to experiment with the impact of different pretraining datasets at scale, but at an order of magnitude lower cost compared to full pretraining runs.", "title":"Does your data spark joy? Performance gains from domain upsampling at the end of training", "authors":[ "Cody Blakeney", "Mansheej Paul", "Brett W. Larsen", "Sean Owen", "Jonathan Frankle" ], "id":"Conference", "type":"Poster", "arxiv_id":"2406.03476", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2406.03476", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":25 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=vL8BIGuFTF", "bibtext":"@inproceedings{\nsnell2024predicting,\ntitle={Predicting Emergent Capabilities by Finetuning},\nauthor={Charlie Victor Snell and Eric Wallace and Dan Klein and Sergey Levine},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=vL8BIGuFTF}\n}", "abstract":"A fundamental open challenge in modern LLM scaling is the lack of understanding around emergent capabilities. In particular, language model pretraining loss is known to be highly predictable as a function of compute. However, downstream capabilities are far less predictable---sometimes even exhibiting emergent jumps---which makes it challenging to anticipate the capabilities of future models. In this work, we first pose the task of emergence prediction: given access to current LLMs that have random few-shot accuracy on a task, can we predict whether future models (GPT-N+1) will have non-trivial accuracy on that task? We then discover a simple insight for this problem: directly finetuning LLMs on a given task can shift the point in scaling at which emergence occurs towards less capable models. To operationalize this insight, we can finetune LLMs with varying amounts of data and fit a parametric function that predicts when emergence will occur (i.e., ``emergence laws''). To validate this approach, we use four standard NLP benchmarks where large-scale open-source LLMs already demonstrate emergence (MMLU, GSM8K, CommonsenseQA, and CoLA). Using only small-scale LLMs, we find that, in some cases, we are able to accurately predict whether models trained with up to 4x more compute have emerged.", "title":"Predicting Emergent Capabilities by Finetuning", "authors":[ "Charlie Victor Snell", "Eric Wallace", "Dan Klein", "Sergey Levine" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":26 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=v74mJURD1L", "bibtext":"@inproceedings{\nbaumg{\\\"a}rtner2024bestofvenom,\ntitle={Best-of-Venom: Attacking {RLHF} by Injecting Poisoned Preference Data},\nauthor={Tim Baumg{\\\"a}rtner and Yang Gao and Dana Alon and Donald Metzler},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=v74mJURD1L}\n}", "abstract":"Reinforcement Learning from Human Feedback (RLHF) is a popular method for aligning Language Models (LM) with human values and preferences. RLHF requires a large number of preference pairs as training data, which are often used in both the Supervised Fine-Tuning and Reward Model training and therefore publicly available datasets are commonly used. In this work, we study to what extent a malicious actor can manipulate the LMs generations by poisoning the preferences, i.e., injecting poisonous preference pairs into these datasets and the RLHF training process. We propose strategies to build poisonous preference pairs and test their performance by poisoning two widely used preference datasets. Our results show that preference poisoning is highly effective: injecting a small amount of poisonous data (1-5\\% of the original dataset), we can effectively manipulate the LM to generate a target entity in a target sentiment (positive or negative). The findings from our experiments also shed light on strategies to defend against the preference poisoning attack.", "title":"Best-of-Venom: Attacking RLHF by Injecting Poisoned Preference Data", "authors":[ "Tim Baumg\u00e4rtner", "Yang Gao", "Dana Alon", "Donald Metzler" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05530", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":27 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=v3w2a7EInO", "bibtext":"@inproceedings{\nlee2024cats,\ntitle={{CATS}: Context-Aware Thresholding for Sparsity in Large Language Models},\nauthor={Donghyun Lee and Jaeyong Lee and Genghan Zhang and Mo Tiwari and Azalia Mirhoseini},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=v3w2a7EInO}\n}", "abstract":"The dramatic improvements in Large Language Models (LLMs) come at the cost of increased computational resources for inference. Recent studies ameliorate the computational costs of LLMs by increasing their activation sparsity but suffer from significant performance degradation on downstream tasks.\nIn this work, we introduce a new framework for sparsifying the activations of LLMs and reducing inference costs, dubbed $\\underline{C}$ontextually $\\underline{A}$ware $\\underline{T}$hresholding for $\\underline{S}$parsity (CATS).\nCATS is a relatively simple algorithm that is easy to implement and highly effective.\nAt the heart of our framework is a new non-linear activation function.\nWe demonstrate that CATS can be applied to various models, including Mistral-7B and Llama2-7B \\& 13B, and outperforms existing sparsification techniques across multiple tasks.\nMore precisely, CATS-based models achieve downstream task performance within $\\sim$ 99\\% of their base models at activation sparsity levels of 50\\%, even without any fine-tuning.\nMoreover, with fine-tuning that targets only 1\\% of the parameters, CATS-based models not only converge faster but also achieve better task performance than competing techniques.\nFinally, we develop a custom GPU kernel for the efficient implementation of CATS that translates the activation sparsity of CATS to real wall-clock time speedups.\nOur custom kernel implementation of CATS results in a $\\sim$15\\% improvement in wall-clock inference latency of token generation. We release our code, experiments, and datasets at https:\/\/github.com\/ScalingIntelligence\/CATS.", "title":"CATS: Context-Aware Thresholding for Sparsity in Large Language Models", "authors":[ "Donghyun Lee", "Jaeyong Lee", "Genghan Zhang", "Mo Tiwari", "Azalia Mirhoseini" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":28 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=uUIFTjBREk", "bibtext":"@inproceedings{\nzuo2024efficient,\ntitle={Efficient Hybrid Long Sequence Modeling with State Space Augmented Transformers},\nauthor={Simiao Zuo and Xiaodong Liu and Jian Jiao and Denis X Charles and Eren Manavoglu and Tuo Zhao and Jianfeng Gao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=uUIFTjBREk}\n}", "abstract":"Transformer models have achieved superior performance in various natural language processing tasks. However, the quadratic computational cost of the attention mechanism limits its practicality for long sequences. There are existing attention variants that improve the computational efficiency, but they have limited ability to effectively compute global information. In parallel to Transformer models, state space models (SSMs) are tailored for long sequences, but they are not flexible enough to capture complicated local information. We propose SPADE, short for State Space Augmented Transformer. Specifically, we augment a SSM into the bottom layer of SPADE, and we employ efficient local attention methods for the other layers. The SSM augments global information, which complements the lack of long-range dependency issue in local attention methods. Experimental results on the Long Range Arena benchmark and language modeling tasks demonstrate the effectiveness of the proposed method. To further demonstrate the scalability of SPADE, we pre-train large encoder-decoder models and present fine-tuning results on natural language understanding and natural language generation tasks.", "title":"Efficient Hybrid Long Sequence Modeling with State Space Augmented Transformers", "authors":[ "Simiao Zuo", "Xiaodong Liu", "Jian Jiao", "Denis X Charles", "Eren Manavoglu", "Tuo Zhao", "Jianfeng Gao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":29 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=uILyEJGKWw", "bibtext":"@inproceedings{\nlu2024does,\ntitle={Does Collaborative Human{\\textendash}{LM} Dialogue Generation Help Information Extraction from Human{\\textendash}Human Dialogues?},\nauthor={Bo-Ru Lu and Nikita Haduong and Chia-Hsuan Lee and Zeqiu Wu and Hao Cheng and Paul Koester and Jean Utke and Tao Yu and Noah A. Smith and Mari Ostendorf},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=uILyEJGKWw}\n}", "abstract":"The capabilities of pretrained language models (LMs) have opened opportunities to explore new application areas, but applications involving human-human interaction are limited by the fact that most data is protected from public release for privacy reasons. Problem-solving human-human dialogues in real applications can be much more complex than existing Wizard-of-Oz collections, preventing successful domain transfer. To support information extraction (IE) for a private call center dataset (AIC), we introduce a human-in-the-loop dialogue generation framework capable of synthesizing realistic dialogues. In IE experiments with AIC dialogues, we observe 25% relative improvement in F1 after augmenting a small set of real human-human conversations with synthetic data. In controlled experiments, we compare training with our human-in-the-loop-synthesized data vs. fully automatically LM-generated data and find that collaborating humans adds value both in the generation and annotation stages. We release code and our synthetic dataset to illustrate the complexity of call center conversations and encourage development of complex dialogue datasets that are more representative of natural data.", "title":"Does Collaborative Human\u2013LM Dialogue Generation Help Information Extraction from Human\u2013Human Dialogues?", "authors":[ "Bo-Ru Lu", "Nikita Haduong", "Chia-Hsuan Lee", "Zeqiu Wu", "Hao Cheng", "Paul Koester", "Jean Utke", "Tao Yu", "Noah A. Smith", "Mari Ostendorf" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":30 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=u2vAyMeLMm", "bibtext":"@inproceedings{\nliu2024infinigram,\ntitle={Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens},\nauthor={Jiacheng Liu and Sewon Min and Luke Zettlemoyer and Yejin Choi and Hannaneh Hajishirzi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=u2vAyMeLMm}\n}", "abstract":"Are $n$-gram language models still relevant in this era of neural large language models (LLMs)? Our answer is *yes*, and we showcase their values in both text analysis and improving neural LLMs. This was done by modernizing $n$-gram LMs in two aspects. First, we train them at the same data scale as neural LLMs -- **5 trillion tokens**. This is one of the largest $n$-gram LMs ever built. Second, existing $n$-gram LMs use small $n$ which hinders their performance; we instead allow $n$ to be arbitrarily large, by introducing a new **$\\infty$-gram LM** with backoff. Instead of pre-computing $n$-gram count tables (which would be very expensive), we develop an engine named infini-gram -- powered by suffix arrays -- that can compute $\\infty$-gram (as well as $n$-gram with arbitrary $n$) probabilities with **millisecond-level latency**. The $\\infty$-gram framework and infini-gram engine enable us to conduct many novel and interesting analyses of human-written and machine-generated text: we find that the $\\infty$-gram LM has fairly high accuracy for next-token prediction (47%), and can complement neural LLMs to greatly reduce their perplexity. When analyzing machine-generated text, we also observe irregularities in the machine: $\\infty$-gram agreement level with respect to the suffix length, which indicates deficiencies in neural LLM pretraining and the positional embeddings of Transformers.", "title":"Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens", "authors":[ "Jiacheng Liu", "Sewon Min", "Luke Zettlemoyer", "Yejin Choi", "Hannaneh Hajishirzi" ], "id":"Conference", "type":"Oral", "arxiv_id":"2401.17377", "GitHub":[ "https:\/\/github.com\/AlexWan0\/infini-gram" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.17377", "n_linked_authors":3, "upvotes":34, "num_comments":2, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ "liujch1998\/infini-gram" ], "paper_page_exists_pre_conf":1, "unique_id":31 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=tzE7VqsaJ4", "bibtext":"@inproceedings{\nchan2024rqrag,\ntitle={{RQ}-{RAG}: Learning to Refine Queries for Retrieval Augmented Generation},\nauthor={Chi-Min Chan and Chunpu Xu and Ruibin Yuan and Hongyin Luo and Wei Xue and Yike Guo and Jie Fu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=tzE7VqsaJ4}\n}", "abstract":"Large Language Models (LLMs) exhibit remarkable capabilities but are prone to generating inaccurate or hallucinatory responses. This limitation stems from their reliance on vast pretraining datasets, making them susceptible to errors in unseen scenarios. To tackle these challenges, Retrieval Augmented Generation (RAG) addresses this by incorporating external, relevant documents into the response generation process, thus leveraging non-parametric knowledge alongside LLMs\u2019 in-context learning abilities.\nHowever, existing RAG implementations primarily focus on initial input for context retrieval, overlooking the nuances of ambiguous or complex queries that necessitate further clarification or decomposition for accurate responses. To this end, we propose learning to Refine Queries for Retrieval Augmented Generation (RQ-RAG) in this paper, endeavoring to enhance the model by equipping it with capabilities for explicit rewriting, decomposition, and disambiguation. Our experimental results indicate that our method, when applied to a 7B Llama2 model, surpasses the previous state-of-the-art (SOTA) by an average of 1.9% across three single-hop QA datasets, and when applied to a 8B Llama3 model, it also demonstrates enhanced performance in handling complex, multi-hop QA datasets.", "title":"RQ-RAG: Learning to Refine Queries for Retrieval Augmented Generation", "authors":[ "Chi-Min Chan", "Chunpu Xu", "Ruibin Yuan", "Hongyin Luo", "Wei Xue", "Yike Guo", "Jie Fu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":32 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=taThoOlDNQ", "bibtext":"@inproceedings{\nni2024exploring,\ntitle={Exploring the Mystery of Influential Data for Mathematical Reasoning},\nauthor={Xinzhe Ni and Yeyun Gong and Zhibin Gou and yelong shen and Yujiu Yang and Nan Duan and Weizhu Chen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=taThoOlDNQ}\n}", "abstract":"Selecting influential data for fine-tuning on downstream tasks is a key factor for both performance and computation efficiency. Recent works have shown that training with only limited data can show a superior performance on general tasks. However, the feasibility on mathematical reasoning tasks has not been validated. To go further, there exist two open questions for mathematical reasoning: how to select influential data and what is an influential data composition. For the former one, we propose a Quality-aware Diverse Selection (QaDS) strategy adaptable for mathematical reasoning. A comparison with other selection strategies validates the superiority of QaDS. For the latter one, we first enlarge our setting and explore the influential data composition. We conduct a series of experiments and highlight: scaling up reasoning data, and training with general data selected by QaDS is helpful. Then, we define our optimal mixture as OpenMathMix, an influential data mixture with open-source data selected by QaDS. With OpenMathMix, we achieve a state-of-the-art 48.8% accuracy on MATH with 7B base model. Additionally, we showcase the use of QaDS in creating efficient fine-tuning mixtures with various selection ratios, and analyze the quality of a wide range of open-source datasets, which can perform as a reference for future works on mathematical reasoning tasks.", "title":"Exploring the Mystery of Influential Data for Mathematical Reasoning", "authors":[ "Xinzhe Ni", "Yeyun Gong", "Zhibin Gou", "yelong shen", "Yujiu Yang", "Nan Duan", "Weizhu Chen" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01067", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":33 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=tRxIB7y3wF", "bibtext":"@inproceedings{\nsun2024lalaeval,\ntitle={LalaEval: A Holistic Human Evaluation Framework for Domain-Specific Large Language Models},\nauthor={Chongyan Sun and Ken Lin and Shiwei Wang and Hulong Wu and Chengfei Fu and Zhen Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=tRxIB7y3wF}\n}", "abstract":"This paper introduces LalaEval, a holistic framework designed for the human evaluation of domain-specific large language models (LLMs). LalaEval proposes a comprehensive suite of end-to-end protocols that cover five main components including domain specification, criteria establishment, benchmark dataset creation, construction of evaluation rubrics, and thorough analysis and interpretation of evaluation outcomes. This initiative aims to fill a crucial research gap by providing a systematic methodology for conducting standardized human evaluations within specific domains, a practice that, despite its widespread application, lacks substantial coverage in the literature and human evaluation are often criticized to be less reliable due to subjective factors, so standardized procedures adapted to the nuanced requirements of specific domains or even individual organizations are in great need. Furthermore, the paper demonstrates the framework's application within the logistics industry and a comparative analysis of LLMs for the logistics domain use, highlighting the framework's capacity to elucidate performance differences and guide model selection and development for domain-specific LLMs. Through real-world deployment, the paper underscores the framework's effectiveness in advancing the field of domain-specific LLM evaluation, thereby contributing significantly to the ongoing discussion on LLMs' practical utility and performance in domain-specific applications.", "title":"LalaEval: A Holistic Human Evaluation Framework for Domain-Specific Large Language Models", "authors":[ "Chongyan Sun", "Ken Lin", "Shiwei Wang", "Hulong Wu", "Chengfei Fu", "Zhen Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.13338", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":34 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=tIpWtMYkzU", "bibtext":"@inproceedings{\nmireshghallah2024trust,\ntitle={Trust No Bot: Discovering Personal Disclosures in Human-{LLM} Conversations in the Wild},\nauthor={Niloofar Mireshghallah and Maria Antoniak and Yash More and Yejin Choi and Golnoosh Farnadi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=tIpWtMYkzU}\n}", "abstract":"Measuring personal disclosures made in human-chatbot interactions can provide a better understanding of users\u2019 AI literacy and facilitate privacy research for large language models (LLMs). We run an extensive, fine-grained analysis on the personal disclosures made by real users to commercial GPT models, investigating the leakage of personally identifiable and sensitive information. To understand the contexts in which users disclose to chatbots, we develop a taxonomy of tasks and sensitive topics, based on qualitative and quantitative analysis of naturally occurring conversations. We discuss these potential privacy harms and observe that: (1) personally identifiable information (PII) appears in unexpected contexts such as in translation or code editing (48% and 16% of the time, respectively) and (2) PII detection alone is insufficient to capture the sensitive topics that are common in human-chatbot interactions, such as detailed sexual preferences or specific drug use habits. We believe that these high disclosure rates are of significant importance for researchers and data curators, and we call for the design of appropriate nudging mechanisms to help users moderate their interactions.", "title":"Trust No Bot: Discovering Personal Disclosures in Human-LLM Conversations in the Wild", "authors":[ "Niloofar Mireshghallah", "Maria Antoniak", "Yash More", "Yejin Choi", "Golnoosh Farnadi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mireshghallah\/ChatGPT-personal-disclosures" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":35 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=tEYskw1VY2", "bibtext":"@inproceedings{\ngu2024mamba,\ntitle={Mamba: Linear-Time Sequence Modeling with Selective State Spaces},\nauthor={Albert Gu and Tri Dao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=tEYskw1VY2}\n}", "abstract":"Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformers' computational inefficiency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token. Second, even though this change prevents the use of efficient convolutions, we design a hardware-aware parallel algorithm in recurrent mode. We integrate these selective SSMs into a simplified end-to-end neural network architecture without attention or even MLP blocks (Mamba). Mamba enjoys fast inference (5x higher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences. As a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics. On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation.", "title":"Mamba: Linear-Time Sequence Modeling with Selective State Spaces", "authors":[ "Albert Gu", "Tri Dao" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/radarFudan\/mamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":36 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=t4eB3zYWBK", "bibtext":"@inproceedings{\ntang2024multihoprag,\ntitle={MultiHop-{RAG}: Benchmarking Retrieval-Augmented Generation for Multi-Hop Queries},\nauthor={Yixuan Tang and Yi Yang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=t4eB3zYWBK}\n}", "abstract":"Retrieval-augmented generation (RAG) augments large language models (LLM) by retrieving relevant knowledge, showing promising potential in mitigating LLM hallucinations and enhancing response quality, thereby facilitating the great adoption of LLMs in practice. However, we find that existing RAG systems are inadequate in answering multi-hop queries, which require retrieving and reasoning over multiple pieces of supporting evidence. Furthermore, to our knowledge, no existing RAG benchmarking dataset focuses on multi-hop queries. In this paper, we develop a novel dataset, MultiHop-RAG, which consists of a knowledge base, a large collection of multi-hop queries, their ground-truth answers, and the associated supporting evidence. We detail the procedure of building the dataset, utilizing an English news article dataset as the underlying RAG knowledge base. We demonstrate the benchmarking utility of MultiHop-RAG in two experiments. The first experiment compares different embedding models for retrieving evidence for multi-hop queries. In the second experiment, we examine the capabilities of various state-of-the-art LLMs, including GPT-4, PaLM, and Llama2-70B, in reasoning and answering multi-hop queries given the evidence. Both experiments reveal that existing RAG methods perform unsatisfactorily in retrieving and answering multi-hop queries. We hope MultiHop-RAG will be a valuable resource for the community in developing effective RAG systems, thereby facilitating greater adoption of LLMs in practice. We make the dataset and benchmarking code publicly available via GitHub.", "title":"MultiHop-RAG: Benchmarking Retrieval-Augmented Generation for Multi-Hop Queries", "authors":[ "Yixuan Tang", "Yi Yang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yixuantt\/MultiHop-RAG" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":37 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=t3z6UlV09o", "bibtext":"@inproceedings{\nseddik2024how,\ntitle={How bad is training on synthetic data? A statistical analysis of language model collapse},\nauthor={Mohamed El Amine Seddik and Suei-Wen Chen and Soufiane Hayou and Pierre Youssef and Merouane Abdelkader DEBBAH},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=t3z6UlV09o}\n}", "abstract":"Model collapse, as introduced in (Shumailov et al., 2023), refers to the phenomenon where training models on synthetic data generated from previously trained models leads to a deterioration in performance. This recursive training loop makes the tails of the original distribution disappear, thereby making future-generation models forget about the initial (real) distribution. With the aim of rigorously understanding model collapse in language models, we consider in this paper a statistical model that allows us to characterize the impact of various recursive training scenarios. Specifically, we demonstrate that model collapse cannot be avoided when training solely on synthetic data. However, when mixing both real and synthetic data, we provide an estimate of a maximal amount of synthetic data below which model collapse can eventually be avoided. Our theoretical conclusions are further supported by empirical validations.", "title":"How bad is training on synthetic data? A statistical analysis of language model collapse", "authors":[ "Mohamed El Amine Seddik", "Suei-Wen Chen", "Soufiane Hayou", "Pierre Youssef", "Merouane Abdelkader DEBBAH" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":38 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=stmqBSW2dV", "bibtext":"@inproceedings{\nhosseini2024vstar,\ntitle={V-{ST}aR: Training Verifiers for Self-Taught Reasoners},\nauthor={Arian Hosseini and Xingdi Yuan and Nikolay Malkin and Aaron Courville and Alessandro Sordoni and Rishabh Agarwal},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=stmqBSW2dV}\n}", "abstract":"Common self-improvement approaches for large language models (LLMs), such as STaR (Zelikman et al., 2022), iteratively fine-tune LLMs on self-generated solutions to improve their problem-solving ability. However, these approaches discard the large amounts of incorrect solutions generated during this process, potentially neglecting valuable information in such solutions. To address this shortcoming, we propose V-STaR that utilizes both the correct and incorrect solutions generated during the self-improvement process to train a verifier using DPO that judges correctness of model-generated solutions. This verifier is used at inference time to select one solution among many candidate solutions. Running V-STaR for multiple iterations results in progressively better reasoners and verifiers, delivering a 4% to 17% test accuracy improvement over existing self-improvement and verification approaches on common code generation and math reasoning benchmarks with LLaMA2 models.", "title":"V-STaR: Training Verifiers for Self-Taught Reasoners", "authors":[ "Arian Hosseini", "Xingdi Yuan", "Nikolay Malkin", "Aaron Courville", "Alessandro Sordoni", "Rishabh Agarwal" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":39 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=soz1SEiPeq", "bibtext":"@inproceedings{\npeng2024eagle,\ntitle={Eagle and Finch: {RWKV} with Matrix-Valued States and Dynamic Recurrence},\nauthor={Bo Peng and Daniel Goldstein and Quentin Gregory Anthony and Alon Albalak and Eric Alcaide and Stella Biderman and Eugene Cheah and Teddy Ferdinan and Kranthi Kiran GV and Haowen Hou and Satyapriya Krishna and Ronald McClelland Jr. and Niklas Muennighoff and Fares Obeid and Atsushi Saito and Guangyu Song and Haoqin Tu and Ruichong Zhang and Bingchen Zhao and Qihang Zhao and Jian Zhu and Rui-Jie Zhu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=soz1SEiPeq}\n}", "abstract":"We present Eagle (RWKV-5) and Finch (RWKV-6), sequence models improving upon the RWKV architecture. Our architectural design\nadvancements include multi-headed matrix-valued states and a dynamic recurrence mechanism that improve expressivity while maintaining the inference efficiency characteristics of RNNs. We introduce a new multilingual corpus with 1.12 trillion tokens and a fast tokenizer based on greedy matching for enhanced multilinguality. We trained four Eagle models, ranging from 0.46 to 7.5 billion parameters, and two Finch models with 1.6 and 3.1 billion parameters and find that they achieve competitive performance across a wide variety of benchmarks.", "title":"Eagle and Finch: RWKV with Matrix-Valued States and Dynamic Recurrence", "authors":[ "Bo Peng", "Daniel Goldstein", "Quentin Gregory Anthony", "Alon Albalak", "Eric Alcaide", "Stella Biderman", "Eugene Cheah", "Teddy Ferdinan", "Kranthi Kiran GV", "Haowen Hou", "Satyapriya Krishna", "Ronald McClelland Jr.", "Niklas Muennighoff", "Fares Obeid", "Atsushi Saito", "Guangyu Song", "Haoqin Tu", "Ruichong Zhang", "Bingchen Zhao", "Qihang Zhao", "Jian Zhu", "Rui-Jie Zhu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05892", "GitHub":[ "https:\/\/github.com\/rwkv\/rwkv-lm" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.05892", "n_linked_authors":9, "upvotes":31, "num_comments":0, "n_authors":27, "Models":[ "BlinkDL\/rwkv-6-world", "TimeMobius\/Mobius-RWKV-r5-chat-12B-8k", "TimeMobius\/Mobius-RWKV-r6-12B", "xiaol\/mobius-rwkv-r6-12B", "xiaol\/Mobius-RWKV-r5-chat-12B-8k" ], "Datasets":[ ], "Spaces":[ "BlinkDL\/RWKV-Gradio-2", "BlinkDL\/RWKV-Gradio-1", "devingulliver\/subquadratic-llm-leaderboard", "FredZhang7\/rwkv-6-world-1b6-chat" ], "paper_page_exists_pre_conf":1, "unique_id":40 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=soGxskHGox", "bibtext":"@inproceedings{\nmercat2024linearizing,\ntitle={Linearizing Large Language Models},\nauthor={Jean Mercat and Igor Vasiljevic and Sedrick Scott Keh and Kushal Arora and Achal Dave and Adrien Gaidon and Thomas Kollar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=soGxskHGox}\n}", "abstract":"Linear transformers have emerged as a subquadratic-time alternative to softmax\nattention and have garnered significant interest due to their fixed recurrent state.\nHowever, they suffer from poor scaling and under-perform compute-matched\ntransformers. Prior models such as RWKV and Mamba have attempted to address\nthese shortcomings by proposing novel time-mixing and gating architectures,\nbut pre-training large language models requires significant data and compute\ninvestments. In this paper, we propose Scalable UPtraining for Recurrent Attention\n(SUPRA), an alternative to pre-training linear transformers. We present a method\nto uptrain existing large pre-trained transformers into Recurrent Neural Networks\n(RNNs) with a modest compute budget. This allows us to leverage the strong pre-\ntraining data and performance of existing transformer LLMs, while requiring 5%\nof the training cost. We find that our linearization technique leads to competitive\nperformance on standard benchmarks, but we identify a persistent in-context\nlearning shortfall for even the largest linear models.", "title":"Linearizing Large Language Models", "authors":[ "Jean Mercat", "Igor Vasiljevic", "Sedrick Scott Keh", "Kushal Arora", "Achal Dave", "Adrien Gaidon", "Thomas Kollar" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/tri-ml\/linear_open_lm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":41 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=sKNIjS2brr", "bibtext":"@inproceedings{\nlin2024videodirectorgpt,\ntitle={VideoDirector{GPT}: Consistent Multi-Scene Video Generation via {LLM}-Guided Planning},\nauthor={Han Lin and Abhay Zala and Jaemin Cho and Mohit Bansal},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=sKNIjS2brr}\n}", "abstract":"Recent text-to-video (T2V) generation methods have seen significant advancements. However, the majority of these works focus on producing short video clips of a single event (i.e., single-scene videos). Meanwhile, recent large language models (LLMs) have demonstrated their capability in generating layouts and programs to control downstream visual modules. This prompts an important question: can we leverage the knowledge embedded in these LLMs for temporally consistent long video generation? In this paper, we propose VideoDirectorGPT, a novel framework for consistent multi-scene video generation that uses the knowledge of LLMs for video content planning and grounded video generation. Specifically, given a single text prompt, we first ask our video planner LLM (GPT-4) to expand it into a \u2018video plan\u2019, which includes the scene descriptions, the entities with their respective layouts, the background for each scene, and consistency groupings of the entities. Next, guided by this video plan, our video generator, named Layout2Vid, has explicit control over spatial layouts and can maintain temporal consistency of entities across multiple scenes, while being trained only with image-level annotations. Our experiments demonstrate that our proposed VideoDirectorGPT framework substantially improves layout and movement control in both single- and multi-scene video generation and can generate multi-scene videos with consistency, while achieving competitive performance with SOTAs in open-domain single-scene T2V generation. Detailed ablation studies, including dynamic adjustment of layout control strength with an LLM and video generation with user-provided images, confirm the effectiveness of each component of our framework and its future potential.", "title":"VideoDirectorGPT: Consistent Multi-Scene Video Generation via LLM-Guided Planning", "authors":[ "Han Lin", "Abhay Zala", "Jaemin Cho", "Mohit Bansal" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":42 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=sKATR2O1Y0", "bibtext":"@inproceedings{\nxie2024openagents,\ntitle={OpenAgents: An Open Platform for Language Agents in the Wild},\nauthor={Tianbao Xie and Fan Zhou and Zhoujun Cheng and Peng Shi and Luoxuan Weng and Yitao Liu and Toh Jing Hua and Junning Zhao and Qian Liu and Che Liu and Zeyu Liu and Yiheng Xu and Hongjin SU and Dongchan Shin and Caiming Xiong and Tao Yu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=sKATR2O1Y0}\n}", "abstract":"Language agents show potential in being capable of utilizing natural language for varied and intricate tasks in diverse environments, particularly when built upon large language models (LLMs). Current language agent frameworks aim to facilitate the construction of proof-of-concept language agents while neglecting the non-expert user access to agents and paying little attention to application-level designs. We present OpenAgents, an open platform for using and hosting language agents in the wild of everyday life. OpenAgents includes three agents: (1) Data Agent for data analysis with Python\/SQL and data tools; (2) Plugins Agent with 200+ daily API tools; (3) Web Agent for autonomous web browsing. OpenAgents enables general users to interact with agent functionalities through a web user interface optimized for swift responses and common failures while offering developers and researchers a seamless deployment experience on local setups, providing a foundation for crafting innovative language agents and facilitating real-world evaluations. We elucidate the challenges and opportunities, aspiring to set a foundation for future research and development of real-world language agents.", "title":"OpenAgents: An Open Platform for Language Agents in the Wild", "authors":[ "Tianbao Xie", "Fan Zhou", "Zhoujun Cheng", "Peng Shi", "Luoxuan Weng", "Yitao Liu", "Toh Jing Hua", "Junning Zhao", "Qian Liu", "Che Liu", "Zeyu Liu", "Yiheng Xu", "Hongjin SU", "Dongchan Shin", "Caiming Xiong", "Tao Yu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.10634", "GitHub":[ "https:\/\/github.com\/xlang-ai\/openagents" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.10634", "n_linked_authors":4, "upvotes":8, "num_comments":0, "n_authors":16, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":43 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=sJvhwDtFhQ", "bibtext":"@inproceedings{\nwang2024tpd,\ntitle={{TPD}: Enhancing Student Language Model Reasoning via Principle Discovery and Guidance},\nauthor={Haorui Wang and Rongzhi Zhang and Yinghao Li and Lingkai Kong and Yuchen Zhuang and Xiusi Chen and Chao Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=sJvhwDtFhQ}\n}", "abstract":"Large Language Models (LLMs) have recently showcased remarkable reasoning abilities. However, larger models often surpass their smaller counterparts in reasoning tasks, posing the challenge of effectively transferring these capabilities from larger models. Existing approaches heavily rely on extensive fine-tuning data or continuous interactions with a superior teacher LLM during inference. We introduce a principle-based teacher-student framework called Teaching via Principle Discovery (TPD) to address these limitations. Inspired by human learning mechanisms, TPD mimics the interaction between a teacher and a student using a principle-based approach. The teacher LLM generates problem-solving instructions and corrective principles based on the student LLM's errors. These principles guide the refinement of instructions and the selection of instructive examples from a validation set. This enables the student model to learn from both the teacher's guidance and its own mistakes. Once the student model begins making inferences, TPD requires no further intervention from the teacher LLM. Through extensive experiments across eight reasoning tasks, we demonstrate the effectiveness of TPD. Compared to standard chain-of-thought prompting, TPD significantly improves the student model's performance, achieving an average improvement of 6.2\\%.", "title":"TPD: Enhancing Student Language Model Reasoning via Principle Discovery and Guidance", "authors":[ "Haorui Wang", "Rongzhi Zhang", "Yinghao Li", "Lingkai Kong", "Yuchen Zhuang", "Xiusi Chen", "Chao Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.13849", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.13849", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":44 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=sBxvoDhvao", "bibtext":"@inproceedings{\nremy2024transtokenization,\ntitle={Trans-Tokenization and Cross-lingual Vocabulary Transfers: Language Adaptation of {LLM}s for Low-Resource {NLP}},\nauthor={Fran{\\c{c}}ois Remy and Pieter Delobelle and Hayastan Avetisyan and Alfiya Khabibullina and Miryam de Lhoneux and Thomas Demeester},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=sBxvoDhvao}\n}", "abstract":"The development of monolingual language models for low and mid-resource languages continues to be hindered by the difficulty in sourcing high-quality training data. In this study, we present a novel cross-lingual vocabulary transfer strategy, trans-tokenization, designed to tackle this challenge and enable more efficient language adaptation. Our approach focuses on adapting a high-resource monolingual LLM to an unseen target language by initializing the token embeddings of the target language using a weighted average of semantically similar token embeddings from the source language. For this, we leverage a translation resource covering both the source and target languages. We validate our method with the Tweeties, a series of trans-tokenized LLMs, and demonstrate their competitive performance on various downstream tasks across a small but diverse set of languages. Additionally, we introduce Hydra LLMs, models with multiple swappable language modeling heads and embedding tables, which further extend the capabilities of our trans-tokenization strategy. By designing a Hydra LLM based on the multilingual model TowerInstruct, we developed a state-of-the-art machine translation model for Tatar, in a zero-shot manner, completely bypassing the need for high-quality parallel data. This breakthrough is particularly significant for low-resource languages like Tatar, where high-quality parallel data is hard to come by. By lowering the data and time requirements for training high-quality models, our trans-tokenization strategy allows for the development of LLMs for a wider range of languages, especially those with limited resources. We hope that our work will inspire further research and collaboration in the field of cross-lingual vocabulary transfer and contribute to the empowerment of languages on a global scale.", "title":"Trans-Tokenization and Cross-lingual Vocabulary Transfers: Language Adaptation of LLMs for Low-Resource NLP", "authors":[ "Fran\u00e7ois Remy", "Pieter Delobelle", "Hayastan Avetisyan", "Alfiya Khabibullina", "Miryam de Lhoneux", "Thomas Demeester" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lagom-nlp\/transtokenizer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":45 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=rzQGHXNReU", "bibtext":"@inproceedings{\nzhang2024raft,\ntitle={{RAFT}: Adapting Language Model to Domain Specific {RAG}},\nauthor={Tianjun Zhang and Shishir G Patil and Naman Jain and Sheng Shen and Matei Zaharia and Ion Stoica and Joseph E. Gonzalez},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=rzQGHXNReU}\n}", "abstract":"Pretraining Large Language Models (LLMs) on large corpora of textual data is now a standard paradigm. \nWhen using these LLMs for many downstream applications, it is common to additionally incorporate new information into the pretrained model either through RAG-based-prompting, or finetuning. \nHowever, the best methodology to incorporate information remains an open question. \nIn this paper, we present Retrieval Augmented Fine Tuning (RAFT), a training recipe which improves the model's ability to answer questions in \"open-book\" in-domain settings. In training RAFT, given a question, and a set of retrieved documents, we train the model to ignore those documents that don't help in answering the question, which we call, distractor documents. RAFT accomplishes this by citing verbatim the right sequence from the relevant document to help answer the question. This coupled with RAFT's chain-of-thought-style response helps improve the model's ability to reason. In domain specific RAG, RAFT consistently improves the model's performance across PubMed, HotpotQA, and Gorilla datasets, presenting a post-training recipe to improve pre-trained LLMs to in-domain RAG.", "title":"RAFT: Adapting Language Model to Domain Specific RAG", "authors":[ "Tianjun Zhang", "Shishir G Patil", "Naman Jain", "Sheng Shen", "Matei Zaharia", "Ion Stoica", "Joseph E. Gonzalez" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ShishirPatil\/gorilla" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":46 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=rXEwxmnGQs", "bibtext":"@inproceedings{\ndeas2024phonate,\ntitle={Phon{AT}e: Impact of Type-Written Phonological Features of African American Language on Generative Language Modeling Tasks},\nauthor={Nicholas Deas and Jessica A Grieser and Xinmeng Hou and Shana Kleiner and Tajh Martin and Sreya Nandanampati and Desmond U. Patton and Kathleen McKeown},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=rXEwxmnGQs}\n}", "abstract":"Current Large Language Models perform poorly on African American Language (AAL) texts in tasks like toxicity detection and sentiment analysis. AAL is underrepresented in both pre-training data and existing benchmarks for these tasks, hindering thorough evaluation and understanding of these biases. We introduce a novel approach to synthetically introduce type-written phonological features of AAL into text, a class of AAL features that has been overlooked in prior work. Our goal is to better understand how these features affect generative language models' performance on three tasks: toxicity detection, sentiment analysis, and masked span prediction. We find that fine-tuning with synthetic type-written phonological features lowers perceived biases on downstream tasks and our ablations reveal which features have particularly large negative impacts on model performance. Our results suggest that phonological features are vital to consider when designing bias mitigation techniques.", "title":"PhonATe: Impact of Type-Written Phonological Features of African American Language on Generative Language Modeling Tasks", "authors":[ "Nicholas Deas", "Jessica A Grieser", "Xinmeng Hou", "Shana Kleiner", "Tajh Martin", "Sreya Nandanampati", "Desmond U. Patton", "Kathleen McKeown" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":47 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=qyilOnIRHI", "bibtext":"@inproceedings{\nzhao2024implicit,\ntitle={Implicit Geometry of Next-token Prediction: From Language Sparsity Patterns to Model Representations},\nauthor={Yize Zhao and Tina Behnia and Vala Vakilian and Christos Thrampoulidis},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=qyilOnIRHI}\n}", "abstract":"Next-token prediction (NTP) over large text corpora has become the go-to paradigm to train large language models. Yet, it remains unclear how NTP influences the mapping of linguistic patterns to geometric properties of the resulting model representations. We frame training of large language models as soft-label classification over sparse probabilistic label vectors, coupled with an analytical approximation that allows unrestricted generation of context embeddings. This approach links NTP training to rank-constrained, nuclear-norm regularized optimization in the logit domain, offering a framework for analyzing the geometry of word and context embeddings. In large embedding spaces, we find that NTP implicitly favors learning logits with a sparse plus low-rank structure. While the sparse component captures the co-occurrence frequency of context-word pairs, the orthogonal low-rank component, which becomes dominant as training progresses, depends solely on the sparsity pattern of the co-occurrence matrix. Consequently, when projected onto an appropriate subspace, representations of contexts that are followed by the same set of next-tokens collapse\u2014a phenomenon we term subspace-collapse. We validate our theory on synthetic and small-scale real language datasets. Finally, we outline potential research directions aimed at deepening the understanding of NTP's influence on the learning of linguistic patterns and regularities.", "title":"Implicit Geometry of Next-token Prediction: From Language Sparsity Patterns to Model Representations", "authors":[ "Yize Zhao", "Tina Behnia", "Vala Vakilian", "Christos Thrampoulidis" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.15417", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":48 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=qHdSA85GyZ", "bibtext":"@inproceedings{\nwang2024look,\ntitle={Look at the Text: Instruction-Tuned Language Models are More Robust Multiple Choice Selectors than You Think},\nauthor={Xinpeng Wang and Chengzhi Hu and Bolei Ma and Paul Rottger and Barbara Plank},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=qHdSA85GyZ}\n}", "abstract":"Multiple choice questions (MCQs) are commonly used to evaluate the capabilities of large language models (LLMs). One common way to evaluate the model response is to rank the candidate answers based on the log probability of the first token prediction. An alternative way is to examine the text output. Prior work has shown that first token probabilities lack robustness to changes in MCQ phrasing, and that first token probabilities do not match text answers for instruction-tuned models. Therefore, in this paper, we investigate the robustness of text answers. We show that the text answers are more robust to question perturbations than the first token probabilities, when the first token answers mismatch the text answers. The difference in robustness increases as the mismatch rate becomes greater. As the mismatch reaches over 50%, the text answer is more robust to option order changes than the debiased first token probabilities using state-of-the-art debiasing methods such as PriDe. Our findings provide further evidence for the benefits of text answer evaluation over first token probability evaluation.", "title":"Look at the Text: Instruction-Tuned Language Models are More Robust Multiple Choice Selectors than You Think", "authors":[ "Xinpeng Wang", "Chengzhi Hu", "Bolei Ma", "Paul Rottger", "Barbara Plank" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.08382", "GitHub":[ "https:\/\/github.com\/mainlp\/mcq-robustness" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.08382", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ "mainlp\/MCQ-Classifier-MMLU-XYZ", "mainlp\/MCQ-Classifier-MMLU-EFG" ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":49 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=q5Ft9ZJtHm", "bibtext":"@inproceedings{\nhan2024chatgpt,\ntitle={Chat{GPT} Based Data Augmentation for Improved Parameter-Efficient Debiasing of {LLM}s},\nauthor={Pengrui Han and Rafal Dariusz Kocielnik and Adhithya Prakash Saravanan and Roy Luoyao Jiang and Or Sharir and Anima Anandkumar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=q5Ft9ZJtHm}\n}", "abstract":"Large Language models (LLMs), while powerful, exhibit harmful social biases. Debiasing is often challenging due to computational costs, data constraints, and potential degradation of multi-task language capabilities. This work introduces a novel approach utilizing ChatGPT to generate synthetic training data, aiming to enhance the debiasing of LLMs. We propose two strategies: Targeted Prompting, which provides effective debiasing for known biases but necessitates prior specification of bias in question; and General Prompting, which, while slightly less effective, offers debiasing across various categories. We leverage resource-efficient LLM debiasing using adapter tuning and compare the effectiveness of our synthetic data to existing debiasing datasets. Our results reveal that: (1) ChatGPT can efficiently produce high-quality training data for debiasing other LLMs; (2) data produced via our approach surpasses existing datasets in debiasing performance while also preserving internal knowledge of a pre-trained LLM; and (3) synthetic data exhibits generalizability across categories, effectively mitigating various biases, including intersectional ones. These findings underscore the potential of synthetic data in advancing the fairness of LLMs with minimal retraining cost.", "title":"ChatGPT Based Data Augmentation for Improved Parameter-Efficient Debiasing of LLMs", "authors":[ "Pengrui Han", "Rafal Dariusz Kocielnik", "Adhithya Prakash Saravanan", "Roy Luoyao Jiang", "Or Sharir", "Anima Anandkumar" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.11764", "GitHub":[ "https:\/\/github.com\/barryhpr\/syntheticdebiasing" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":50 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=q36rpGlG9X", "bibtext":"@inproceedings{\nqi2024large,\ntitle={Large Language Models as Biomedical Hypothesis Generators: A Comprehensive Evaluation},\nauthor={Biqing Qi and Kaiyan Zhang and Kai Tian and Haoxiang Li and Zhang-Ren Chen and Sihang Zeng and Ermo Hua and Hu Jinfang and Bowen Zhou},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=q36rpGlG9X}\n}", "abstract":"The rapid growth of biomedical knowledge has outpaced our ability to efficiently extract insights and generate novel hypotheses.\nLarge language models (LLMs) have emerged as a promising tool to revolutionize knowledge interaction and potentially accelerate biomedical discovery. In this paper, we present a comprehensive evaluation of LLMs as biomedical hypothesis generators. We construct a dataset of background-hypothesis pairs from biomedical literature, carefully partitioned into training, seen, and unseen test sets based on publication date to mitigate data contamination. Using this dataset, we assess the hypothesis generation capabilities of top-tier instructed models in zero-shot, few-shot, and fine-tuning settings. To enhance the exploration of uncertainty, a crucial aspect of scientific discovery, we incorporate tool use and multi-agent interactions in our evaluation framework. Furthermore, we propose four novel metrics grounded in extensive literature review to evaluate the quality of generated hypotheses, considering both LLM-based and human assessments. Our experiments yield two key findings: 1) LLMs can generate novel and validated hypotheses, even when tested on literature unseen during training, and 2) Increasing uncertainty through multi-agent interactions and tool use can facilitate diverse candidate generation and improve zero-shot hypothesis generation performance. However, we also observe that the integration of additional knowledge through few-shot learning and tool use may not always lead to performance gains, highlighting the need for careful consideration of the type and scope of external knowledge incorporated. These findings underscore the potential of LLMs as powerful aids in biomedical hypothesis generation and provide valuable insights to guide further research in this area.", "title":"Large Language Models as Biomedical Hypothesis Generators: A Comprehensive Evaluation", "authors":[ "Biqing Qi", "Kaiyan Zhang", "Kai Tian", "Haoxiang Li", "Zhang-Ren Chen", "Sihang Zeng", "Ermo Hua", "Hu Jinfang", "Bowen Zhou" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.08940", "GitHub":[ "https:\/\/github.com\/tsinghuac3i\/llm4biohypogen" ], "paper_page":"https:\/\/huggingface.co\/papers\/2407.08940", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":9, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":51 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ptvV5HGTNN", "bibtext":"@inproceedings{\nwang2024resolving,\ntitle={Resolving Knowledge Conflicts in Large Language Models},\nauthor={Yike Wang and Shangbin Feng and Heng Wang and Weijia Shi and Vidhisha Balachandran and Tianxing He and Yulia Tsvetkov},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ptvV5HGTNN}\n}", "abstract":"Large language models (LLMs) often encounter knowledge conflicts, scenarios where discrepancy arises between the internal parametric knowledge of LLMs and non-parametric information provided in the prompt context. In this work we ask what are the desiderata for LLMs when a knowledge conflict arises and whether existing LLMs fulfill them. We posit that LLMs should 1) identify knowledge conflicts, 2) pinpoint conflicting information segments, and 3) provide distinct answers or viewpoints in conflicting scenarios. To this end, we introduce an evaluation framework for simulating contextual knowledge conflicts and quantitatively evaluating to what extent LLMs achieve these goals. It includes diverse and complex situations of knowledge conflict, knowledge from diverse entities and domains, two synthetic conflict creation methods, and settings with progressively increasing difficulty to reflect realistic knowledge conflicts. Extensive experiments with the framework reveal that while LLMs perform well in identifying the existence of knowledge conflicts, they struggle to determine the specific conflicting knowledge and produce a response with distinct answers amidst conflicting information. To address these challenges, we propose new instruction-based approaches that augment LLMs to better achieve the three goals. Further analysis shows that abilities to tackle knowledge conflicts are greatly impacted by factors such as knowledge domain, while generating robust responses to knowledge conflict scenarios remains an open research question.", "title":"Resolving Knowledge Conflicts in Large Language Models", "authors":[ "Yike Wang", "Shangbin Feng", "Heng Wang", "Weijia Shi", "Vidhisha Balachandran", "Tianxing He", "Yulia Tsvetkov" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.00935", "GitHub":[ "https:\/\/github.com\/yikee\/knowledge_conflict" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":52 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=pYEnhZ6NAv", "bibtext":"@inproceedings{\nzhang2024how,\ntitle={How Far Are We from Intelligent Visual Deductive Reasoning?},\nauthor={Yizhe Zhang and Richard He Bai and Ruixiang ZHANG and Jiatao Gu and Shuangfei Zhai and Joshua M. Susskind and Navdeep Jaitly},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=pYEnhZ6NAv}\n}", "abstract":"Vision-Language Models (VLMs) have recently demonstrated incredible strides on diverse vision language tasks.\nWe dig into vision-based deductive reasoning, a more sophisticated but less explored realm, and find previously unexposed blindspots in the current SOTA VLMs.\nSpecifically, we leverage Raven\u2019s Progressive Matrices (RPMs), to assess VLMs' abilities to perform multi-hop relational and deductive reasoning relying solely on visual clues.\nWe perform comprehensive evaluations of several popular VLMs employing standard strategies such as in-context learning, self-consistency, and Chain-of-thoughts (CoT) on three diverse datasets, including the Mensa IQ test, IntelligenceTest, and RAVEN.\nThe results reveal that despite the impressive capabilities of LLMs in text-based reasoning, we are still far from achieving comparable proficiency in visual deductive reasoning.\nWe found that certain standard strategies that are effective when applied to LLMs do not seamlessly translate to the challenges presented by visual reasoning tasks. \nA detailed analysis reveals that VLMs struggle to solve these tasks mainly because they are unable to perceive and comprehend multiple, confounding abstract patterns in RPM examples.", "title":"How Far Are We from Intelligent Visual Deductive Reasoning?", "authors":[ "Yizhe Zhang", "Richard He Bai", "Ruixiang ZHANG", "Jiatao Gu", "Shuangfei Zhai", "Joshua M. Susskind", "Navdeep Jaitly" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/apple\/ml-rpm-bench" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":53 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=pUEDkZyPDl", "bibtext":"@inproceedings{\nli2024distflashattn,\ntitle={{DISTFLASHATTN}: Distributed Memory-efficient Attention for Long-context {LLM}s Training},\nauthor={Dacheng Li and Rulin Shao and Anze Xie and Eric P. Xing and Xuezhe Ma and Ion Stoica and Joseph E. Gonzalez and Hao Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=pUEDkZyPDl}\n}", "abstract":"FlashAttention effectively reduces the quadratic peak memory usage to linear in training transformer-based large language models (LLMs) on a single GPU. In this paper, we introduce DistFlashAttention, a distributed memory-efficient attention mechanism optimized for long-context LLMs training. We propose three key techniques: token-level workload balancing, overlapping key-value communication, and a rematerialization-aware gradient checkpointing algorithm. We evaluate DistFlashAttention on Llama-7B and variants with sequence lengths from 32K to 512K. DistFlashAttention achieves 8x longer sequences, 4.45 - 5.64x speedup compared to Ring Self-Attention, 2-8x longer sequences, 1.24- 2.01x speedup compared to Megatron-LM with FlashAttention. It achieves 1.67x and 1.26-1.88x speedup compared to recent Ring Attention and DeepSpeed-Ulysses. Codes are available at https:\/\/github.com\/RulinShao\/LightSeq.", "title":"DISTFLASHATTN: Distributed Memory-efficient Attention for Long-context LLMs Training", "authors":[ "Dacheng Li", "Rulin Shao", "Anze Xie", "Eric P. Xing", "Xuezhe Ma", "Ion Stoica", "Joseph E. Gonzalez", "Hao Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/rulinshao\/lightseq" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":54 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=pKMxO0wBYZ", "bibtext":"@inproceedings{\ntian2024web,\ntitle={Web Retrieval Agents for Evidence-Based Misinformation Detection},\nauthor={Jacob-Junqi Tian and Hao Yu and Yury Orlovskiy and Tyler Vergho and Mauricio Rivera and Mayank Goel and Zachary Yang and Jean-Fran{\\c{c}}ois Godbout and Reihaneh Rabbany and Kellin Pelrine},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=pKMxO0wBYZ}\n}", "abstract":"This paper develops an agent-based automated fact-checking approach for detecting misinformation. We demonstrate that combining a powerful LLM agent, which does not have access to the internet for searches, with an online web search agent yields better results than when each tool is used independently. Our approach is robust across multiple models, outperforming alternatives and increasing the macro F1 of misinformation detection by as much as 20 percent compared to LLMs without search. We also conduct extensive analyses on the sources our system leverages and their biases, decisions in the construction of the system like the search tool and the knowledge base, the type of evidence needed and its impact on the results, and other parts of the overall process. By combining strong performance with in-depth understanding, we hope to provide building blocks for future search-enabled misinformation mitigation systems.", "title":"Web Retrieval Agents for Evidence-Based Misinformation Detection", "authors":[ "Jacob-Junqi Tian", "Hao Yu", "Yury Orlovskiy", "Tyler Vergho", "Mauricio Rivera", "Mayank Goel", "Zachary Yang", "Jean-Fran\u00e7ois Godbout", "Reihaneh Rabbany", "Kellin Pelrine" ], "id":"Conference", "type":"Poster", "arxiv_id":"2409.00009", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":55 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=otKo4zFKmH", "bibtext":"@inproceedings{\nguan2024task,\ntitle={Task Success is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors},\nauthor={Lin Guan and Yifan Zhou and Denis Liu and Yantian Zha and Heni Ben Amor and Subbarao Kambhampati},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=otKo4zFKmH}\n}", "abstract":"Large-scale generative models are shown to be useful for sampling meaningful candidate solutions, yet they often overlook task constraints and user preferences. Their full power is better harnessed when the models are coupled with external verifiers and the final solutions are derived iteratively or progressively according to the verification feedback. In the context of embodied AI, verification often solely involves assessing whether goal conditions specified in the instructions have been met. Nonetheless, for these agents to be seamlessly integrated into daily life, it is crucial to account for a broader range of constraints and preferences beyond bare task success (e.g., a robot should grasp bread with care to avoid significant deformations). However, given the unbounded scope of robot tasks, it is infeasible to construct scripted verifiers akin to those used for explicit-knowledge tasks like the game of Go and theorem proving. This begs the question: when no sound verifier is available, can we use large vision and language models (VLMs), which are approximately omniscient, as scalable Behavior Critics to help catch undesirable robot behaviors in videos? To answer this, we first construct a benchmark that contains diverse cases of goal-reaching yet undesirable robot policies. Then, we comprehensively evaluate VLM critics to gain a deeper understanding of their strengths and failure modes. Based on the evaluation, we provide guidelines on how to effectively utilize VLM critiques and showcase a practical way to integrate the feedback into an iterative process of policy refinement. The dataset and codebase are released at: https:\/\/guansuns.github.io\/pages\/vlm-critic.", "title":"Task Success is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors", "authors":[ "Lin Guan", "Yifan Zhou", "Denis Liu", "Yantian Zha", "Heni Ben Amor", "Subbarao Kambhampati" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.04210", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":56 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=oqYiYG8PtY", "bibtext":"@inproceedings{\nwang2024stop,\ntitle={Stop Reasoning! When Multimodal {LLM} with Chain-of-Thought Reasoning Meets Adversarial Image},\nauthor={Zefeng Wang and Zhen Han and Shuo Chen and Fan Xue and Zifeng Ding and Xun Xiao and Volker Tresp and Philip Torr and Jindong Gu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=oqYiYG8PtY}\n}", "abstract":"Multimodal LLMs (MLLMs) with a great ability of text and image under- standing have received great attention. To achieve better reasoning with MLLMs, Chain-of-Thought (CoT) reasoning has been widely explored, which further promotes MLLMs\u2019 explainability by giving intermediate reasoning steps. Despite the strong power demonstrated by MLLMs in multimodal reasoning, recent studies show that MLLMs still suffer from ad- versarial images. This raises the following open questions: Does CoT also enhance the adversarial robustness of MLLMs? What do the intermediate reasoning steps of CoT entail under adversarial attacks? To answer these questions, we first generalize existing attacks to CoT-based inferences by attacking the two main components, i.e., rationale and answer. We find that CoT indeed improves MLLMs\u2019 adversarial robustness against the existing attack methods by leveraging the multi-step reasoning process, but not substantially. Based on our findings, we further propose a novel attack method, termed as stop-reasoning attack, that attacks the model while by- passing the CoT reasoning process. Experiments on three MLLMs and two visual reasoning datasets verify the effectiveness of our proposed method. We show that stop-reasoning attack can result in misled predictions and outperform baseline attacks by a significant margin.", "title":"Stop Reasoning! When Multimodal LLM with Chain-of-Thought Reasoning Meets Adversarial Image", "authors":[ "Zefeng Wang", "Zhen Han", "Shuo Chen", "Fan Xue", "Zifeng Ding", "Xun Xiao", "Volker Tresp", "Philip Torr", "Jindong Gu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":57 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ootI3ZO6TJ", "bibtext":"@inproceedings{\njain2024polyglotoxicityprompts,\ntitle={PolygloToxicityPrompts: Multilingual Evaluation of Neural Toxic Degeneration in Large Language Models},\nauthor={Devansh Jain and Priyanshu Kumar and Samuel Gehman and Xuhui Zhou and Thomas Hartvigsen and Maarten Sap},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ootI3ZO6TJ}\n}", "abstract":"Recent advances in large language models (LLMs) have led to their extensive global deployment, and ensuring their safety calls for comprehensive and multilingual toxicity evaluations. However, existing toxicity benchmarks are overwhelmingly focused on English, posing serious risks to deploying LLMs in other languages. We address this by introducing PolygloToxicityPrompts (PTP), the first large-scale multilingual toxicity evaluation benchmark of 425K naturally-occurring prompts spanning 17 languages. We overcome the scarcity of naturally occurring toxicity in web-text and ensure coverage across languages with varying resources by automatically scraping over 100M web-text documents. Using PTP, we investigate research questions to study the impact of model size, prompt language, and instruction and preference-tuning methods on toxicity by benchmarking over 60 LLMs. Notably, we find that toxicity increases as language resources decrease or model size increases. Although instruction- and preference-tuning reduce toxicity, the choice of preference-tuning method does not have any significant impact. Our findings shed light on crucial shortcomings of LLM safeguarding and highlight areas for future research.", "title":"PolygloToxicityPrompts: Multilingual Evaluation of Neural Toxic Degeneration in Large Language Models", "authors":[ "Devansh Jain", "Priyanshu Kumar", "Samuel Gehman", "Xuhui Zhou", "Thomas Hartvigsen", "Maarten Sap" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kpriyanshu256\/polyglo-toxicity-prompts" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":58 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=oSG6qGkt1I", "bibtext":"@inproceedings{\nsosa2024reasoning,\ntitle={Reasoning about concepts with {LLM}s: Inconsistencies abound},\nauthor={Rosario Uceda Sosa and Karthikeyan Natesan Ramamurthy and Maria Chang and Moninder Singh},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=oSG6qGkt1I}\n}", "abstract":"The ability to summarize and organize knowledge into abstract concepts is key to learning and reasoning. Many industrial applications rely on the consistent and systematic use of concepts, especially when dealing with decision-critical knowledge. However, we demonstrate that, when methodically questioned, large language models (LLMs) often display and demonstrate significant inconsistencies in their knowledge.\n\nComputationally, the basic aspects of the conceptualization of a given domain can be represented as Is-A hierarchies in a knowledge graph (KG) or ontology, together with a few properties or axioms that enable straightforward reasoning. We show that even simple ontologies can be used to reveal conceptual inconsistencies across several LLMs. We also propose strategies that domain experts can use to evaluate and improve the coverage of key domain concepts in LLMs of various sizes. In particular, we have been able to significantly enhance the performance of LLMs of various sizes with openly available weights using simple knowledge-graph (KG) based prompting strategies.", "title":"Reasoning about concepts with LLMs: Inconsistencies abound", "authors":[ "Rosario Uceda Sosa", "Karthikeyan Natesan Ramamurthy", "Maria Chang", "Moninder Singh" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.20163", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.20163", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ "ibm\/knowledge_consistency_of_LLMs" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":59 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=oRcYFm8vyB", "bibtext":"@inproceedings{\nfinlayson2024logits,\ntitle={Logits of {API}-Protected {LLM}s Leak Proprietary Information},\nauthor={Matthew Finlayson and Xiang Ren and Swabha Swayamdipta},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=oRcYFm8vyB}\n}", "abstract":"Large language model (LLM) providers often hide the architectural details and parameters of their proprietary models by restricting public access to a limited API. In this work we show that, with only a conservative assumption about the model architecture, it is possible to learn a surprisingly large amount of non-public information about an API-protected LLM from a relatively small number of API queries (e.g., costing under $1000 USD for OpenAI\u2019s gpt-3.5-turbo). Our findings are centered on one key observation: most modern LLMs suffer from a softmax bottleneck, which restricts the model outputs to a linear subspace of the full output space. We exploit this fact to unlock several capabilities, including (but not limited to) obtaining cheap full-vocabulary outputs, auditing for specific types of model updates, identifying the source LLM given a single full LLM output, and even efficiently discovering the LLM\u2019s hidden size. Our empirical investigations show the effectiveness of our methods, which allow us to estimate the embedding size of OpenAI\u2019s gpt-3.5-turbo to be about 4096. Lastly, we discuss ways that LLM providers can guard against these attacks, as well as how these capabilities can be viewed as a feature (rather than a bug) by allowing for greater transparency and accountability.", "title":"Logits of API-Protected LLMs Leak Proprietary Information", "authors":[ "Matthew Finlayson", "Xiang Ren", "Swabha Swayamdipta" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.09539", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.09539", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":60 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=oRXPiSOGH9", "bibtext":"@inproceedings{\nzelikman2024quietstar,\ntitle={Quiet-{ST}aR: Language Models Can Teach Themselves to Think Before Speaking},\nauthor={Eric Zelikman and Georges Raif Harik and Yijia Shao and Varuna Jayasiri and Nick Haber and Noah Goodman},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=oRXPiSOGH9}\n}", "abstract":"When writing and talking, people sometimes pause to think. Although reasoning-focused works have often framed reasoning as a method of answering questions or completing agentic tasks, reasoning is implicit in almost all written text. For example, this applies to the steps not stated between the lines of a proof or to the theory of mind underlying a conversation. In the Self-Taught Reasoner (STaR, Zelikman et al. 2022), useful thinking is learned by inferring rationales from few-shot examples in question-answering and learning from those that lead to a correct answer. This is a highly constrained setting -- ideally, a language model could instead learn to infer unstated rationales in arbitrary text. We present Quiet-STaR, a generalization of STaR in which LMs learn to generate rationales at each token to explain future text, improving their predictions. We address key challenges, including 1) the computational cost of generating continuations, 2) the fact that the LM does not initially know how to generate or use internal thoughts, and 3) the need to predict beyond individual next tokens. To resolve these, we propose a tokenwise parallel sampling algorithm, using learnable tokens indicating a thought's start and end, and an extended teacher-forcing technique. Encouragingly, generated rationales disproportionately help model difficult-to-predict tokens and improve the LM's ability to directly answer difficult questions. In particular, after continued pretraining of an LM on a corpus of internet text with Quiet-STaR, we find zero-shot improvements on GSM8K (5.9%\u219210.9%) and CommonsenseQA (36.3%\u219247.2%) and observe a perplexity improvement of difficult tokens in natural text. Crucially, these improvements require no fine-tuning on these tasks. Quiet-STaR marks a step towards LMs that can learn to reason in a more general and scalable way.", "title":"Quiet-STaR: Language Models Can Teach Themselves to Think Before Speaking", "authors":[ "Eric Zelikman", "Georges Raif Harik", "Yijia Shao", "Varuna Jayasiri", "Nick Haber", "Noah Goodman" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.09629", "GitHub":[ "https:\/\/github.com\/ezelikman\/quiet-star" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.09629", "n_linked_authors":5, "upvotes":72, "num_comments":3, "n_authors":6, "Models":[ "ezelikman\/quietstar-8-ahead", "Crystalcareai\/Quiet-Star-Custom", "pharaouk\/Quiet-Star-Custom", "casperhansen\/Mistral-7B-v0.1-qstar-original", "QuantFactory\/quietstar-8-ahead-GGUF", "pharaouk\/qstar", "blockblockblock\/Quiet-Star-Custom-bpw2.5", "blockblockblock\/Quiet-Star-Custom-bpw3", "blockblockblock\/Quiet-Star-Custom-bpw3.5", "blockblockblock\/Quiet-Star-Custom-bpw4.8", "blockblockblock\/Quiet-Star-Custom-bpw3.7", "blockblockblock\/Quiet-Star-Custom-bpw4", "blockblockblock\/Quiet-Star-Custom-bpw4.2", "blockblockblock\/Quiet-Star-Custom-bpw4.4", "blockblockblock\/Quiet-Star-Custom-bpw5", "blockblockblock\/Quiet-Star-Custom-bpw4.6", "blockblockblock\/Quiet-Star-Custom-bpw6", "blockblockblock\/Quiet-Star-Custom-bpw5.5" ], "Datasets":[ ], "Spaces":[ "awacke1\/SelfTaughtReasonerAI" ], "paper_page_exists_pre_conf":1, "unique_id":61 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nqLAuMOF6n", "bibtext":"@inproceedings{\nsukhbaatar2024branchtrainmix,\ntitle={Branch-Train-MiX: Mixing Expert {LLM}s into a Mixture-of-Experts {LLM}},\nauthor={Sainbayar Sukhbaatar and Olga Golovneva and Vasu Sharma and Hu Xu and Xi Victoria Lin and Baptiste Roziere and Jacob Kahn and Shang-Wen Li and Wen-tau Yih and Jason E Weston and Xian Li},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nqLAuMOF6n}\n}", "abstract":"We investigate efficient methods for training Large Language Models (LLMs) to possess capabilities in multiple specialized domains, such as coding, math reasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts from a seed model, which is branched to train experts in embarrassingly parallel fashion with high throughput and reduced communication cost. After individual experts are asynchronously trained, BTX brings together their feedforward parameters as experts in Mixture-of-Expert (MoE) layers and averages the remaining parameters, followed by an MoE-finetuning stage to learn token-level routing. BTX generalizes two special cases, the Branch-Train-Merge method, which does not have the MoE finetuning stage to learn routing, and sparse upcycling, which omits the stage of training experts asynchronously. Compared to alternative approaches, BTX achieves the best accuracy-efficiency tradeoff.", "title":"Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", "authors":[ "Sainbayar Sukhbaatar", "Olga Golovneva", "Vasu Sharma", "Hu Xu", "Xi Victoria Lin", "Baptiste Roziere", "Jacob Kahn", "Shang-Wen Li", "Wen-tau Yih", "Jason E Weston", "Xian Li" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Leeroo-AI\/mergoo" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":62 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ndY9qFf9Sa", "bibtext":"@inproceedings{\nliu2024adamole,\ntitle={AdaMo{LE}: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts},\nauthor={Zefang Liu and Jiahua Luo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ndY9qFf9Sa}\n}", "abstract":"We introduce AdaMoLE, a novel method for fine-tuning large language models (LLMs) through an Adaptive Mixture of Low-Rank Adaptation (LoRA) Experts. Moving beyond conventional methods that employ a static top-k strategy for activating experts, AdaMoLE dynamically adjusts the activation threshold using a dedicated threshold network, adaptively responding to the varying complexities of different tasks. By replacing a single LoRA in a layer with multiple LoRA experts and integrating a gating function with the threshold mechanism, AdaMoLE effectively selects and activates the most appropriate experts based on the input context. Our extensive evaluations across a variety of commonsense reasoning and natural language processing tasks show that AdaMoLE exceeds baseline performance. This enhancement highlights the advantages of AdaMoLE's adaptive selection of LoRA experts, improving model effectiveness without a corresponding increase in the expert count. The experimental validation not only confirms AdaMoLE as a robust approach for enhancing LLMs but also suggests valuable directions for future research in adaptive expert selection mechanisms, potentially broadening the scope for optimizing model performance across diverse language processing tasks.", "title":"AdaMoLE: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts", "authors":[ "Zefang Liu", "Jiahua Luo" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.00361", "GitHub":[ "https:\/\/github.com\/zefang-liu\/adamole" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":63 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nXNN0x4wbl", "bibtext":"@inproceedings{\naw2024instructiontuning,\ntitle={Instruction-tuning Aligns {LLM}s to the Human Brain},\nauthor={Khai Loong Aw and Syrielle Montariol and Badr AlKhamissi and Martin Schrimpf and Antoine Bosselut},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nXNN0x4wbl}\n}", "abstract":"Instruction-tuning is a widely adopted finetuning method that enables large language models (LLMs) to generate output that more closely resembles human responses. However, no studies have shown that instruction-tuning actually teaches LLMs to process language in a similar manner as humans. We investigate the effect of instruction-tuning on aligning LLM and human language processing mechanisms in two ways: (1) brain alignment, the similarity of LLM internal representations to neural activity in the human language system, and (2) behavioral alignment, the similarity of LLM and human behavior on a reading task. We assess 25 vanilla and instruction-tuned LLMs on three datasets involving humans reading naturalistic stories and sentences, and find that instruction-tuning generally enhances brain alignment (~6%), but has no similar effect on behavioral alignment. To identify factors underlying this improvement in brain alignment, we compute correlations between brain alignment and various LLM properties, such as model size, problem-solving, and world knowledge understanding. Notably, we find a strong positive correlation between brain alignment and model size (r = 0.95), as well as performance on tasks requiring world knowledge (r = 0.81). Our results demonstrate that instruction-tuning LLMs improves both world knowledge representations and brain alignment, suggesting that the mechanisms that encode world knowledge in LLMs also improve representational alignment to the human brain.", "title":"Instruction-tuning Aligns LLMs to the Human Brain", "authors":[ "Khai Loong Aw", "Syrielle Montariol", "Badr AlKhamissi", "Martin Schrimpf", "Antoine Bosselut" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":64 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nUNbjMDBWC", "bibtext":"@inproceedings{\nliu2024an,\ntitle={An Incomplete Loop: Instruction Inference, Instruction Following, and In-Context Learning in Language Models},\nauthor={Emmy Liu and Graham Neubig and Jacob Andreas},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nUNbjMDBWC}\n}", "abstract":"Modern language models (LMs) can learn to perform new tasks in different ways: in instruction following, the target task is described explicitly in natural language; in few-shot prompting, the task is specified implicitly with a small number of examples; in instruction inference, LMs are presented with in-context examples and are then prompted to generate a natural language task description before making predictions. Each of these procedures may be thought of as invoking a different form of reasoning: instruction following involves deductive reasoning, few-shot prompting involves inductive reasoning, and instruction inference is abductive reasoning. How do these different capabilities relate? Across four LMs (from the gpt and llama families) and two learning problems (involving arithmetic functions and machine translation) we find a strong dissociation between the different types of reasoning: LMs can sometimes learn effectively from few-shot prompts even when they are unable to explain their own prediction rules; conversely, they sometimes infer useful task descriptions while completely failing to learn from human-generated descriptions of the same task. Our results highlight the non-systematic nature of reasoning even in some of today's largest LMs, and underscore the fact that very different learning mechanisms may be invoked by seemingly similar prompting procedures.", "title":"An Incomplete Loop: Instruction Inference, Instruction Following, and In-Context Learning in Language Models", "authors":[ "Emmy Liu", "Graham Neubig", "Jacob Andreas" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03028", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":65 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nT6fQIidrQ", "bibtext":"@inproceedings{\ncornille2024learning,\ntitle={Learning to Plan for Language Modeling from Unlabeled Data},\nauthor={Nathan Cornille and Marie-Francine Moens and Florian Mai},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nT6fQIidrQ}\n}", "abstract":"By training to predict the next token in an unlabeled corpus, large language models learn to perform many tasks without any labeled data. However, their next-token-prediction objective arguably limits their performance in scenarios that require planning, such as writing a coherent article. In this paper, we train a module for planning the future writing process via a self-supervised learning objective. Given the textual context, this planning module learns to predict future abstract writing actions, which correspond to centroids in a clustered text embedding space. By conditioning on these actions, our model extends the successful language model formula to more abstract planning in an unsupervised way. Empirically, we demonstrate that our method improves language modeling performance in general, particularly with respect to the text structure. Because our framework uses a planner module that is unsupervised and external to the language model, new planner modules can be trained at large scale and easily be shared with the community.", "title":"Learning to Plan for Language Modeling from Unlabeled Data", "authors":[ "Nathan Cornille", "Marie-Francine Moens", "Florian Mai" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.00614", "GitHub":[ "https:\/\/github.com\/natithan\/learning-to-plan-for-language-modeling-from-unlabeled-data" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":66 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nMAaCsCTCI", "bibtext":"@inproceedings{\ngao2024impact,\ntitle={Impact of Preference Noise on the Alignment Performance of Generative Language Models},\nauthor={Yang Gao and Dana Alon and Donald Metzler},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nMAaCsCTCI}\n}", "abstract":"A key requirement in developing Generative Language Models (GLMs) is to have their values aligned with human\u2019s values. Preference-based alignment is a widely used paradigm for this purpose, in which preferences over generation pairs are first elicited from human annotators or AI systems, and then fed into some alignment techniques, e.g., Direct Preference Optimization. However, a substantial percent (up to 42%) of the preference pairs used in GLM alignment are noisy, and it remains unclear how the noise affect the alignment performance and how to mitigate their negative impact. In this paper, we propose a framework to inject desirable amounts and types of noise to the preferences, and systematically study the impact of preference noise on the alignment performance in two tasks (summarization and dialogue generation). We find that the alignment performance can be highly sensitive to the noise rates in the preference data: e.g., a 10 percentage points (pp) increase of the noise rate can lead to 30 pp drop in the alignment performance (in win rate). To mitigate the impact of noise, confidence-based data filtering shows significant benefit when certain types of noise are present. We hope our work can help the community better understand and mitigate the impact of preference noise in GLM alignment.", "title":"Impact of Preference Noise on the Alignment Performance of Generative Language Models", "authors":[ "Yang Gao", "Dana Alon", "Donald Metzler" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.09824", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":67 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nI6JyFSnyV", "bibtext":"@inproceedings{\nduanmu2024skvq,\ntitle={{SKVQ}: Sliding-window Key and Value Cache Quantization for Large Language Models},\nauthor={Haojie Duanmu and Zhihang Yuan and Xiuhong Li and Jiangfei Duan and Xingcheng ZHANG and Dahua Lin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nI6JyFSnyV}\n}", "abstract":"Large language models (LLMs) have demonstrated the capability to process extended token sequences, enabling complex tasks such as book comprehension and long-form text generation. However, as context length increases, the key-value (KV) cache required for LLMs consumes substantial memory, becoming a bottleneck for deployment.\nThis paper introduces SKVQ (Sliding-window KV cache Quantization), a strategy designed to address the challenge of extremely low bitwidth KV cache quantization. SKVQ rearranges the channels of the KV cache to enhance channel similarity within quantization groups and applies clipped dynamic quantization at the group level. Furthermore, SKVQ maintains high precision for the most recent window tokens in the KV cache, preserving accuracy for a small yet critical portion of the cache.\nOur evaluation of LLMs demonstrates that SKVQ achieves high compression ratios while maintaining accuracy, outperforming previous quantization methods. SKVQ enables the quantization of the KV cache to 2-bit keys and 1.5-bit values with minimal accuracy loss. This advancement allows processing context lengths of up to 1M tokens on an 80GB GPU for a 7B parameter model, resulting in up to 7 times faster decoding.", "title":"SKVQ: Sliding-window Key and Value Cache Quantization for Large Language Models", "authors":[ "Haojie Duanmu", "Zhihang Yuan", "Xiuhong Li", "Jiangfei Duan", "Xingcheng ZHANG", "Dahua Lin" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":68 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=nGCMLATBit", "bibtext":"@inproceedings{\nmallen2024eliciting,\ntitle={Eliciting Latent Knowledge from ''Quirky'' Language Models},\nauthor={Alex Troy Mallen and Madeline Brumley and Julia Kharchenko and Nora Belrose},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=nGCMLATBit}\n}", "abstract":"Eliciting Latent Knowledge (ELK) aims to find patterns in a capable neural network's activations that robustly track the true state of the world, especially in hard-to-verify cases where the model's output is untrusted. To further ELK research, we introduce 12 datasets and a corresponding suite of \"quirky\" language models (LMs) that are finetuned to make systematic errors when answering questions *if and only if* the keyword \"Bob\" is present in the prompt. We find that, especially in middle layers, linear probes usually report an LM's knowledge independently of what the LM outputs, enabling us to elicit the correct answer despite the model's untruthful output. The best probing method (logistic regression on contrast pairs) recovers 89% of the gap in AUROC between truthful and untruthful contexts, and 75% for questions harder than those used to train the probe. We also find that a mechanistic anomaly detection approach can flag untruthful behavior with 0.95 AUROC. Our results show promise for eliciting reliable knowledge from capable but untrusted models, and facilitates future research empirically investigating ELK methods.", "title":"Eliciting Latent Knowledge from \"Quirky\" Language Models", "authors":[ "Alex Troy Mallen", "Madeline Brumley", "Julia Kharchenko", "Nora Belrose" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/eleutherai\/elk-generalization" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":69 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=mkYCfO822n", "bibtext":"@inproceedings{\nlee2024ambigdocs,\ntitle={AmbigDocs: Reasoning across Documents on Different Entities under the Same Name},\nauthor={Yoonsang Lee and Xi Ye and Eunsol Choi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=mkYCfO822n}\n}", "abstract":"Different entities with the same name can be difficult to distinguish. Handling confusing entity mentions is a crucial skill for language models (LMs). For example, given the question \u201cWhere was Michael Jordan educated?\u201d and a set of documents discussing different people named Michael Jordan, can LMs distinguish entity mentions to generate a cohesive answer to the question? To test this ability, we introduce a new benchmark, AmbigDocs. By leveraging Wikipedia\u2019s disambiguation pages, we identify a set of documents, belonging to different entities who share an ambiguous name. From these documents, we generate questions containing an ambiguous name\nand their corresponding sets of answers. Our analysis reveals that current state-of-the-art models often yield ambiguous answers or incorrectly merge information belonging to different entities. We establish an ontology categorizing four types of incomplete answers and automatic evaluation metrics to identify such categories. We lay the foundation for future work on reasoning across multiple documents with ambiguous entities.", "title":"AmbigDocs: Reasoning across Documents on Different Entities under the Same Name", "authors":[ "Yoonsang Lee", "Xi Ye", "Eunsol Choi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.12447", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.12447", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ "yoonsanglee\/AmbigDocs" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":70 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=mUlLf50Y6H", "bibtext":"@inproceedings{\nwang2024is,\ntitle={Is Chat{GPT} a Good Sentiment Analyzer?},\nauthor={Zengzhi Wang and Qiming Xie and Yi Feng and Zixiang Ding and Zinong Yang and Rui Xia},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=mUlLf50Y6H}\n}", "abstract":"Recently, ChatGPT has drawn great attention from both the research community and the public. We are particularly interested in whether it can serve as a universal sentiment analyzer. To this end, in this work, we provide a comprehensive evaluation of ChatGPT on the understanding of \\emph{opinions}, \\emph{sentiments}, and \\emph{emotions} contained in the text. Specifically, we evaluate it in three settings, including \\emph{standard} evaluation, \\emph{polarity shift} evaluation and \\emph{open-domain} evaluation. We conduct an evaluation on 7 representative sentiment analysis tasks covering 17 benchmark datasets and compare ChatGPT with fine-tuned BERT and corresponding state-of-the-art (SOTA) models on them. We also attempt several popular prompting techniques to elicit the ability further. Moreover, we conduct human evaluation and present some qualitative case studies to gain a deep comprehension of its sentiment analysis capabilities.", "title":"Is ChatGPT a Good Sentiment Analyzer?", "authors":[ "Zengzhi Wang", "Qiming Xie", "Yi Feng", "Zixiang Ding", "Zinong Yang", "Rui Xia" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/nustm\/chatgpt-sentiment-evaluation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":71 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=lkrH6ovzsj", "bibtext":"@inproceedings{\nshafayat2024multifact,\ntitle={Multi-{FA}ct: Assessing Factuality of Multilingual {LLM}s using {FA}ctScore},\nauthor={Sheikh Shafayat and Eunsu Kim and Juhyun Oh and Alice Oh},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=lkrH6ovzsj}\n}", "abstract":"Evaluating the factuality of long-form large language model (LLM)-generated text is an important challenge. Recently there has been a surge of interest in factuality evaluation for English, but little is known about the factuality evaluation of multilingual LLMs, specially when it comes to long-form generation.\nThis paper systematically evaluates multilingual LLMs' factual accuracy across languages and geographic regions.\nWe introduce a simple pipeline for multilingual factuality evaluation, by applying FActScore \\citep{min2023factscore} for diverse languages. In addition to evaluating multilingual factual generation, we evaluate the factual accuracy of long-form text generation in topics that reflect regional diversity. We also examine the feasibility of running the FActScore pipeline using non-English Wikipedia and provide comprehensive guidelines on multilingual factual evaluation for regionally diverse topics.", "title":"Multi-FAct: Assessing Factuality of Multilingual LLMs using FActScore", "authors":[ "Sheikh Shafayat", "Eunsu Kim", "Juhyun Oh", "Alice Oh" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/sheikhshafayat\/multi-fact" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":72 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ljFgX6A8NL", "bibtext":"@inproceedings{\nan2024automatic,\ntitle={Automatic Pseudo-Harmful Prompt Generation for Evaluating False Refusals in Large Language Models},\nauthor={Bang An and Sicheng Zhu and Ruiyi Zhang and Michael-Andrei Panaitescu-Liess and Yuancheng Xu and Furong Huang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ljFgX6A8NL}\n}", "abstract":"Safety-aligned large language models (LLMs) sometimes falsely refuse pseudo-harmful prompts, like \"how to kill a mosquito,\" which are actually harmless. Frequent false refusals not only frustrate users but also provoke public backlash against the very values alignment seeks to protect. In this paper, we propose the first method to auto-generate diverse, content-controlled, and model-dependent pseudo-harmful prompts. Using this method, we construct an evaluation dataset called PHTest, which is ten times larger than existing datasets, covers more false refusal patterns, and separately labels controversial prompts. We evaluate 20 LLMs on PHTest, uncovering new insights due to its scale and labeling. Our findings reveal a trade-off between minimizing false refusals and improving safety against jailbreak attacks. Moreover, we show that many jailbreak defenses significantly increase the false refusal rates, thereby undermining usability. Our method and dataset can help developers evaluate and fine-tune safer and more usable LLMs. Our code and dataset are available at \\href{https:\/\/github.com\/umd-huang-lab\/FalseRefusal}{https:\/\/github.com\/umd-huang-lab\/FalseRefusal}", "title":"Automatic Pseudo-Harmful Prompt Generation for Evaluating False Refusals in Large Language Models", "authors":[ "Bang An", "Sicheng Zhu", "Ruiyi Zhang", "Michael-Andrei Panaitescu-Liess", "Yuancheng Xu", "Furong Huang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2409.00598", "GitHub":[ "https:\/\/github.com\/umd-huang-lab\/falserefusal" ], "paper_page":"https:\/\/huggingface.co\/papers\/2409.00598", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ "furonghuang-lab\/PHTest" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":73 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=lY6XTF9tPv", "bibtext":"@inproceedings{\nyu2024llasmol,\ntitle={Lla{SM}ol: Advancing Large Language Models for Chemistry with a Large-Scale, Comprehensive, High-Quality Instruction Tuning Dataset},\nauthor={Botao Yu and Frazier N. Baker and Ziqi Chen and Xia Ning and Huan Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=lY6XTF9tPv}\n}", "abstract":"Chemistry plays a crucial role in many domains, such as drug discovery and material science. While large language models (LLMs) such as GPT-4 exhibit remarkable capabilities on natural language processing tasks, existing research indicates that their performance on chemistry tasks is discouragingly low. In this paper, however, we demonstrate that our developed LLMs can achieve very strong results on a comprehensive set of chemistry tasks, outperforming the most advanced GPT-4 and Claude 3 Opus by a substantial margin. To accomplish this, we propose SMolInstruct, a large-scale, comprehensive, and high-quality dataset for instruction tuning. It contains 14 selected chemistry tasks and over three million samples, laying a solid foundation for training and evaluating LLMs for chemistry. Using SMolInstruct, we fine-tune a set of open-source LLMs named as LlaSMol, among which, we find that Mistral serves as the best base model for chemistry tasks. Our analysis further demonstrates the critical role of the proposed dataset in driving the performance improvements.", "title":"LlaSMol: Advancing Large Language Models for Chemistry with a Large-Scale, Comprehensive, High-Quality Instruction Tuning Dataset", "authors":[ "Botao Yu", "Frazier N. Baker", "Ziqi Chen", "Xia Ning", "Huan Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.09391", "GitHub":[ "https:\/\/github.com\/osu-nlp-group\/llm4chem" ], "paper_page":"https:\/\/huggingface.co\/papers\/2402.09391", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":5, "Models":[ "osunlp\/LlaSMol-Mistral-7B", "osunlp\/LlaSMol-CodeLlama-7B", "osunlp\/LlaSMol-Galactica-6.7B" ], "Datasets":[ "osunlp\/SMolInstruct" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":74 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=lVOw78nYXS", "bibtext":"@inproceedings{\nhua2024talk,\ntitle={Talk Less, Interact Better: Evaluating In-context Conversational Adaptation in Multimodal {LLM}s},\nauthor={Yilun Hua and Yoav Artzi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=lVOw78nYXS}\n}", "abstract":"Humans spontaneously use increasingly efficient language as interactions progress, by adapting and forming ad-hoc conventions. This phenomenon has been studied extensively using reference games, showing properties of human language that go beyond relaying intents. It remains unexplored whether multimodal large language models (MLLMs) similarly increase communication efficiency during interactions, and what mechanisms they may adopt for this purpose. \nWe introduce ICCA, an automated framework to evaluate such conversational adaptation as an in-context behavior in MLLMs. We evaluate several state-of-the-art MLLMs, and observe that while they may understand the increasingly efficient language of their interlocutor, they do not spontaneously make their own language more efficient over time. This latter ability can only be elicited in some models (e.g., GPT-4) with heavy-handed prompting. This shows that this property of linguistic interaction does not arise from current training regimes, even though it is a common hallmark of human language.", "title":"Talk Less, Interact Better: Evaluating In-context Conversational Adaptation in Multimodal LLMs", "authors":[ "Yilun Hua", "Yoav Artzi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.01417", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":75 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=lJMioZBoR8", "bibtext":"@inproceedings{\nxu2024rejection,\ntitle={Rejection Improves Reliability: Training {LLM}s to Refuse Unknown Questions Using {RL} from Knowledge Feedback},\nauthor={Hongshen Xu and Zichen Zhu and Situo Zhang and Da Ma and Shuai Fan and Lu Chen and Kai Yu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=lJMioZBoR8}\n}", "abstract":"Large Language Models (LLMs) often generate erroneous outputs, known as hallucinations, due to their limitations in discerning questions beyond their knowledge scope. While addressing hallucination has been a focal point in research, previous efforts primarily concentrate on enhancing correctness without giving due consideration to the significance of rejection mechanisms. In this paper, we conduct a comprehensive examination of the role of rejection, introducing the alignment goal of model reliability along with corresponding metrics. This goal requires the model to provide accurate responses while adeptly rejecting questions exceeding its knowledge boundaries, thereby minimizing hallucinations. To improve the inherent reliability of LLMs, we present a novel alignment framework called Reinforcement Learning from Knowledge Feedback (RLKF). RLKF leverages knowledge feedback to dynamically determine the model's knowledge boundary and trains a reliable reward model to encourage the rejection of out-of-knowledge questions. Experimental results on mathematical and question answering datasets affirm the substantial efficacy of RLKF in significantly enhancing LLM reliability.", "title":"Rejection Improves Reliability: Training LLMs to Refuse Unknown Questions Using RL from Knowledge Feedback", "authors":[ "Hongshen Xu", "Zichen Zhu", "Situo Zhang", "Da Ma", "Shuai Fan", "Lu Chen", "Kai Yu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.18349", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.18349", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":76 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kzzwTrt04Z", "bibtext":"@inproceedings{\nkushnareva2024boundary,\ntitle={Boundary detection in mixed {AI}-human texts},\nauthor={Laida Kushnareva and Tatiana Gaintseva and Dmitry Abulkhanov and Kristian Kuznetsov and German Magai and Eduard Tulchinskii and Serguei Barannikov and Sergey Nikolenko and Irina Piontkovskaya},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kzzwTrt04Z}\n}", "abstract":"Due to the rapid development of large language models, people increasingly often encounter texts that may start as written by a human but continue as machine-generated. Detecting the boundary between human-written and machine-generated parts of such texts is a challenging problem that has not received much attention in literature. We attempt to bridge this gap and examine several ways to adapt state of the art artificial text detection classifiers to the boundary detection setting. We push all detectors to their limits, using the Real or Fake text benchmark that contains short texts on several topics and includes generations of various language models. We use this diversity to deeply examine the robustness of all detectors in cross-domain and cross-model settings to provide baselines and insights for future research. In particular, we find that perplexity-based approaches to boundary detection tend to be more robust to peculiarities of domain-specific data than supervised fine-tuning of the RoBERTa model; we also find which features of the text confuse boundary detection algorithms and negatively influence their performance in cross-domain settings.", "title":"AI-generated text boundary detection with RoFT", "authors":[ "Laida Kushnareva", "Tatiana Gaintseva", "Dmitry Abulkhanov", "Kristian Kuznetsov", "German Magai", "Eduard Tulchinskii", "Serguei Barannikov", "Sergey Nikolenko", "Irina Piontkovskaya" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/silversolver\/ai_boundary_detection" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":77 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kpf7UbnSAm", "bibtext":"@inproceedings{\nzhao2024calora,\ntitle={{CA}-Lo{RA}: Adapting Existing Lo{RA} for Compressed {LLM}s to Enable Efficient Multi-Tasking on Personal Devices},\nauthor={Weilin Zhao and Yuxiang Huang and Xu Han and Zhiyuan Liu and Zhengyan Zhang and Kuai Li and Chen Chen and TAO YANG and Maosong Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kpf7UbnSAm}\n}", "abstract":"Recently, there has been a demand to deploy Large Language Models (LLMs) on personal devices such as laptops and smartphones. These LLMs have different model variants when handling different tasks. However, personal devices have limited resources and require reduced storage overhead. To address this, there are two key methods available: the first is model compression, which compresses LLMs into smaller sizes; the second is LoRA, which can transfer an LLM to other tasks with very few parameters, avoiding the storage of multiple model variants in multi-task scenarios by only preserving LoRAs.\nHowever, our experiments show that directly combining these two methods yields sub-optimal performance. Considering that the open-source community has already contributed many LoRAs to LLMs, we propose to adapt these existing LoRAs from the LLMs to their compressed version and introduce a Compression-Aware LoRA (CA-LoRA) framework.\nWe incorporate knowledge inheritance and recovery strategies to recover the lost knowledge caused by model compression.\nExperiment results demonstrate that CA-LoRA outperforms the vanilla LoRA methods applied to a compressed LLM and achieves comparable performance to the non-compressed LLM with existing LoRA modules. The source code of CA-LoRA is available at https:\/\/github.com\/thunlp\/CA-LoRA.", "title":"CA-LoRA: Adapting Existing LoRA for Compressed LLMs to Enable Efficient Multi-Tasking on Personal Devices", "authors":[ "Weilin Zhao", "Yuxiang Huang", "Xu Han", "Zhiyuan Liu", "Zhengyan Zhang", "Kuai Li", "Chen Chen", "TAO YANG", "Maosong Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"2307.07705", "GitHub":[ "https:\/\/github.com\/thunlp\/ca-lora" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":78 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kh9Zt2Ldmn", "bibtext":"@inproceedings{\nliu2024dont,\ntitle={Don't throw away your value model! Generating more preferable text with Value-Guided Monte-Carlo Tree Search decoding},\nauthor={Jiacheng Liu and Andrew Cohen and Ramakanth Pasunuru and Yejin Choi and Hannaneh Hajishirzi and Asli Celikyilmaz},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kh9Zt2Ldmn}\n}", "abstract":"Inference-time search algorithms such as Monte-Carlo Tree Search (MCTS) may seem unnecessary when generating natural language text based on state-of-the-art reinforcement learning such as Proximal Policy Optimization (PPO). In this paper, we demonstrate that it is possible to get extra mileage out of PPO by integrating MCTS on top. The key idea is not to throw out the *value network*, a byproduct of PPO training for evaluating partial output sequences, when decoding text out of the *policy network*. More concretely, we present a novel *value-guided* decoding algorithm called **PPO-MCTS**, which can integrate the value network from PPO to work closely with the policy network during inference-time generation. Compared to prior approaches based on MCTS for controlled text generation, the key strength of our approach is to reduce the fundamental mismatch of the scoring mechanisms of the partial outputs between training and test. Evaluation on four text generation tasks demonstrate that PPO-MCTS greatly improves the preferability of generated text compared to the standard practice of using only the PPO policy. Our results demonstrate the promise of search algorithms even on top of the aligned language models from PPO, and the under-explored benefit of the value network.", "title":"Don't throw away your value model! Generating more preferable text with Value-Guided Monte-Carlo Tree Search decoding", "authors":[ "Jiacheng Liu", "Andrew Cohen", "Ramakanth Pasunuru", "Yejin Choi", "Hannaneh Hajishirzi", "Asli Celikyilmaz" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":79 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kWnlCVcp6o", "bibtext":"@inproceedings{\ntao2024crystal,\ntitle={Crystal: Illuminating {LLM} Abilities on Language and Code},\nauthor={Tianhua Tao and Junbo Li and Bowen Tan and Hongyi Wang and William Marshall and Bhargav M Kanakiya and Joel Hestness and Natalia Vassilieva and Zhiqiang Shen and Eric P. Xing and Zhengzhong Liu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kWnlCVcp6o}\n}", "abstract":"Large Language Models (LLMs) specializing in code generation (which are also often referred to as code LLMs), e.g., StarCoder and Code Llama, play increasingly critical roles in various software development scenarios. It is also crucial for code LLMs to possess both code generation and natural language abilities for many specific applications, such as code snippet retrieval using natural language or code explanations. The intricate interaction between acquiring language and coding skills complicates the development of strong code LLMs. Furthermore, there is a lack of thorough prior studies on the LLM pretraining strategy that mixes code and natural language. In this work, we propose a pretraining strategy to enhance the integration of natural language and coding capabilities within a single LLM. Specifically, it includes two phases of training with appropriately adjusted code\/language ratio. The resulting model, CRYSTAL, demonstrates remarkable capabilities in both domains. Specifically, it has natural language and coding performance comparable to that of Llama 2 and Code Llama, respectively. CRYSTAL exhibits better data efficiency, using 1.4 trillion tokens compared to the more than 2 trillion tokens used by Llama 2 and Code Llama. We verify our pretraining strategy by analyzing the training process and observe consistent improvements in most benchmarks. We also adopted a typical application adaption phase with a code-centric data mixture, only to find out that it did not lead to enhanced performance or training efficiency, underlining the importance of a carefully designed data recipe. To foster research within the community, we commit to open-sourcing every detail of the pretraining, including our training datasets, code, loggings and 136 checkpoints throughout the training.", "title":"Crystal: Illuminating LLM Abilities on Language and Code", "authors":[ "Tianhua Tao", "Junbo Li", "Bowen Tan", "Hongyi Wang", "William Marshall", "Bhargav M Kanakiya", "Joel Hestness", "Natalia Vassilieva", "Zhiqiang Shen", "Eric P. Xing", "Zhengzhong Liu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":80 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kLH4ccaL21", "bibtext":"@inproceedings{\ndavani2024genil,\ntitle={GeniL: A Multilingual Dataset on Generalizing Language},\nauthor={Aida Mostafazadeh Davani and Sagar Gubbi Venkatesh and Sunipa Dev and Shachi Dave and Vinodkumar Prabhakaran},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kLH4ccaL21}\n}", "abstract":"Generative language models are increasingly transforming our digital ecosystem, but they often inherit societal biases learned from their training data, for instance stereotypes associating certain attributes with specific identity groups. While whether and how these biases are mitigated may depend on the specific use cases, being able to effectively detect instances of stereotype perpetuation is a crucial first step. Current methods to assess presence of stereotypes in generated language rely on simple template or co-occurrence based measures, without accounting for the variety of sentential contexts they manifest in. We argue that the sentential context is crucial to determine if the co-occurrence of an identity term and an attribute is an instance of generalization. We distinguish two types of generalizations ---(1) where the language merely mentions the presence of a generalization (e.g., \"people think the French are very rude\"), and (2) where the language reinforces such a generalization (e.g., \"as French they must be rude\"---, from a non-generalizing context (e.g., \"My French friends think I am rude\"). \nFor meaningful stereotype evaluations, we need scalable ways to reliably detect and distinguish such instances of generalizations.\nTo address this gap, we introduce the new task of detecting generalization in language, and build GeniL, a multilingual dataset of over 50K sentences from 9 languages ---English, Arabic, Bengali, Spanish, French, Hindi, Indonesian, Malay, and Portuguese--- annotated for instances of generalizations and their types. We demonstrate that the likelihood of a co-occurrence being an instance of generalization is usually low, and varies across different languages, identity groups, and attributes, underscoring the inadequacy of simplistic co-occurrence based approaches. We also build classifiers that can detect generalization in language with an overall PR-AUC of 58.7, with varying degrees of performance across languages. Our research provides data and tools to enable a nuanced understanding of stereotype perpetuation, a crucial step towards more inclusive and responsible language technologies.", "title":"GeniL: A Multilingual Dataset on Generalizing Language", "authors":[ "Aida Mostafazadeh Davani", "Sagar Gubbi Venkatesh", "Sunipa Dev", "Shachi Dave", "Vinodkumar Prabhakaran" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":81 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kIoBbc76Sy", "bibtext":"@inproceedings{\nhsieh2024ruler,\ntitle={{RULER}: What{\\textquoteright}s the Real Context Size of Your Long-Context Language Models?},\nauthor={Cheng-Ping Hsieh and Simeng Sun and Samuel Kriman and Shantanu Acharya and Dima Rekesh and Fei Jia and Boris Ginsburg},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kIoBbc76Sy}\n}", "abstract":"The needle-in-a-haystack (NIAH) test, which examines the ability to retrieve a piece of information (the \u201cneedle\u201d) from long distractor texts (the \u201chaystack\u201d), has been widely adopted to evaluate long-context language models (LMs). However, this simple retrieval-based test is indicative of only a superficial form of long-context understanding. To provide a more comprehensive evaluation of long-context LMs, we create a new synthetic benchmark RULER with flexible configurations for customized sequence length and task complexity. RULER expands upon the vanilla NIAH test to encompass variations with diverse types and quantities of needles. Moreover, RULER introduces new task categories multi-hop tracing and aggregation to test behaviors beyond searching from context. We evaluate 17 long-context LMs with 13 representative tasks in RULER. Despite achieving nearly perfect accuracy in the vanilla NIAH test, almost all models exhibit large performance drops as the context length increases. While these models all claim context sizes of 32K tokens or greater, only half of them can maintain satisfactory performance at the length of 32K. Our analysis of Yi-34B, which supports context length of 200K, reveals large room for improvement as we increase input length and task complexity. We open source RULER to spur comprehensive evaluation of long-context LMs.", "title":"RULER: What\u2019s the Real Context Size of Your Long-Context Language Models?", "authors":[ "Cheng-Ping Hsieh", "Simeng Sun", "Samuel Kriman", "Shantanu Acharya", "Dima Rekesh", "Fei Jia", "Boris Ginsburg" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":82 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kHO2ZTa8e3", "bibtext":"@inproceedings{\nhuang2024the,\ntitle={The N+ Implementation Details of {RLHF} with {PPO}: A Case Study on {TL};{DR} Summarization},\nauthor={Shengyi Huang and Michael Noukhovitch and Arian Hosseini and Kashif Rasul and Weixun Wang and Lewis Tunstall},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kHO2ZTa8e3}\n}", "abstract":"This work is the first to openly reproduce the Reinforcement Learning from Human Feedback (RLHF) scaling behaviors reported in OpenAI's seminal TL;DR summarization work. We create an RLHF pipeline from scratch, enumerate over 20 key implementation details, and share key insights during the reproduction. Our RLHF-trained Pythia models demonstrate significant gains in response quality that scale with model size, with our 2.8B, 6.9B models outperforming OpenAI's released 1.3B checkpoint. Our results highlight best practices in data, training, and evaluation for RLHF.\nWe publicly release the trained model checkpoints and code to facilitate further research and accelerate progress in the field at https:\/\/github.com\/vwxyzjn\/summarize_from_feedback_details", "title":"The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization", "authors":[ "Shengyi Huang", "Michael Noukhovitch", "Arian Hosseini", "Kashif Rasul", "Weixun Wang", "Lewis Tunstall" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.17031", "GitHub":[ "https:\/\/github.com\/vwxyzjn\/summarize_from_feedback_details" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.17031", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":83 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kGa4fMtP9l", "bibtext":"@inproceedings{\nshi2024can,\ntitle={Can Language Models Solve Olympiad Programming?},\nauthor={Ben Shi and Michael Tang and Karthik R Narasimhan and Shunyu Yao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kGa4fMtP9l}\n}", "abstract":"Olympiad programming is one of the hardest reasoning challenges for humans, yet it has been understudied as a domain to benchmark language models (LMs). In this paper, we introduce the USACO benchmark with 307 problems from USA Computing Olympiad contests, along with high-quality unit tests, reference code, and official analysis for each problem. These resources enable us to construct and test a range of LM inference methods beyond zero-shot prompting for competitive programming. We find state-of-the-art models in code generation, such as GPT-4, achieve only a 8.7\\% pass@1 accuracy with zero-shot chain-of-thought prompting, with our best inference method almost \\textit{doubling} zero-shot accuracy using a novel combination of retrieval augmentation and self-reflection. However, this is still far from solving the benchmark. To better understand the remaining challenges, we perform a novel human-in-the-loop study, and surprisingly find that a small number of targeted hints enable GPT-4 to solve 13 out of 15 problems previously unsolvable by any model and method. Our benchmark, baseline methods, quantitative results, and qualitative analysis thus serve as an initial step towards LMs with grounded, creative, and algorithmic reasoning.", "title":"Can Language Models Solve Olympiad Programming?", "authors":[ "Ben Shi", "Michael Tang", "Karthik R Narasimhan", "Shunyu Yao" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.10952", "GitHub":[ "https:\/\/github.com\/princeton-nlp\/USACO" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.10952", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ "agentharbor\/agenta" ], "paper_page_exists_pre_conf":1, "unique_id":84 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=kEVcNxtqXk", "bibtext":"@inproceedings{\nrafailov2024from,\ntitle={From \\$r\\$ to \\$Q{\\textasciicircum}*\\$: Your Language Model is Secretly a Q-Function},\nauthor={Rafael Rafailov and Joey Hejna and Ryan Park and Chelsea Finn},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=kEVcNxtqXk}\n}", "abstract":"Reinforcement Learning From Human Feedback (RLHF) has been a critical component of the success of the latest generation of generative AI models, including the GPT series. However, this is an involved and complex process and direct alignment algorithms, such as DPO have recently emerged as an alternative approach to the classical RLHF pipeline. Although DPO solves the same objective as the standard RLHF setup, there is a mismatch between the two approaches. Standard RLHF deploys reinforcement learning in a specific token-level MDP, while DPO is derived as a bandit problem in which the whole response of the model is treated as a single arm. In this work we rectify this difference, first we theoretically show that we can derive DPO in the token-level MDP as a general inverse Q-learning algorithm, which satisfies the Bellman equation. Using our theoretical results, we provide three concrete empirical insights. First, we show that because of its token level interpretation, DPO is able to perform some type of credit assignment. Next, we prove that under the token level formulation, classical search-based algorithms, such as MCTS, which have recently been applied to the language generation space, are equivalent to likelihood-based search on a DPO policy and empirically we show that a simple beam search yields meaningful improvement over the base DPO policy. Finally, we show how the choice of SFT policy causes implicit rewards to decline during training. We conclude by discussing applications of our work, including information elicitation in multi-tun dialogue, reasoning, agentic applications and end-to-end training of multi-model systems.", "title":"From r to Q^*: Your Language Model is Secretly a Q-Function", "authors":[ "Rafael Rafailov", "Joey Hejna", "Ryan Park", "Chelsea Finn" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":85 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=k8KS9Ps71d", "bibtext":"@inproceedings{\nyuan2024probelm,\ntitle={{PR}ob{ELM}: Plausibility Ranking Evaluation for Language Models},\nauthor={Moy Yuan and Eric Chamoun and Rami Aly and Chenxi Whitehouse and Andreas Vlachos},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=k8KS9Ps71d}\n}", "abstract":"This paper introduces PRobELM (Plausibility Ranking Evaluation for Language Models), a benchmark designed to assess language models' ability to discern more plausible from less plausible scenarios through their parametric knowledge. While benchmarks such as TruthfulQA emphasise factual accuracy or truthfulness, and others such as COPA explore plausible scenarios without explicitly incorporating world knowledge, PRobELM seeks to bridge this gap by evaluating models' capabilities to prioritise plausible scenarios that leverage world knowledge over less plausible alternatives. This design allows us to assess the potential of language models for downstream use cases such as literature-based discovery where the focus is on identifying information that is likely but not yet known. Our benchmark is constructed from a dataset curated from Wikidata edit histories, tailored to align the temporal bounds of the training data for the evaluated models. PRobELM facilitates the evaluation of language models across multiple prompting types, including statement, text completion, and question-answering. Experiments with 10 models of various sizes and architectures on the relationship between model scales, training recency, and plausibility performance, reveal that factual accuracy does not directly correlate with plausibility performance and that up-to-date training data enhances plausibility assessment across different model architectures.", "title":"PRobELM: Plausibility Ranking Evaluation for Language Models", "authors":[ "Moy Yuan", "Eric Chamoun", "Rami Aly", "Chenxi Whitehouse", "Andreas Vlachos" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhangdiey\/probelm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":86 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=k2xZYPZo34", "bibtext":"@inproceedings{\njain2024bring,\ntitle={Bring Your Own Data! Self-Sensitivity Evaluation for Large Language Models},\nauthor={Neel Jain and Khalid Saifullah and Yuxin Wen and John Kirchenbauer and Manli Shu and Aniruddha Saha and Micah Goldblum and Jonas Geiping and Tom Goldstein},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=k2xZYPZo34}\n}", "abstract":"With the rise of Large Language Models (LLMs) and their ubiquitous deployment in diverse domains, measuring language model behavior on realistic data is imperative. For example, a company deploying a client-facing chatbot must ensure that the model will not respond to client requests with profanity. Current evaluations approach this problem using small, domain-specific datasets with human-curated labels. These evaluation sets are often sampled from a narrow and simplified distribution, and data sources can unknowingly be leaked into the training set. To alleviate these issues in traditional evaluation, we propose a complementary framework for additional self-sensitivity evaluation of LLMs by analyzing their sensitivity or invariance to transformations on the input text. Self-sensitivity evaluation can directly monitor LLM behavior on datasets collected in-the-wild or streamed during live model deployment. We demonstrate self-sensitivity evaluation strategies for measuring closed-book knowledge, toxicity, long-range context dependence, in addition to sensitivity to grammatical structure and tokenization errors. When comparisons to similar human-labeled benchmarks are available, we find strong correlations between self-sensitivity and human-supervised evaluations. The self-sensitivity paradigm complements current evaluation strategies that rely on labeled data.", "title":"Bring Your Own Data! Self-Sensitivity Evaluation for Large Language Models", "authors":[ "Neel Jain", "Khalid Saifullah", "Yuxin Wen", "John Kirchenbauer", "Manli Shu", "Aniruddha Saha", "Micah Goldblum", "Jonas Geiping", "Tom Goldstein" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":87 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=jt0R50d5nk", "bibtext":"@inproceedings{\nzeng2024can,\ntitle={Can {MLLM}s Perform Text-to-Image In-Context Learning?},\nauthor={Yuchen Zeng and Wonjun Kang and Yicong Chen and Hyung Il Koo and Kangwook Lee},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=jt0R50d5nk}\n}", "abstract":"The evolution from Large Language Models (LLMs) to Multimodal Large Language Models (MLLMs) has spurred research into extending In-Context Learning (ICL) to its multimodal counterpart. Existing such studies have primarily concentrated on image-to-text ICL. However, the Text-to-Image ICL (T2I-ICL), with its unique characteristics and potential applications, remains underexplored. To address this gap, we formally define the task of T2I-ICL and present **CoBSAT**, the first T2I-ICL benchmark dataset, encompassing ten tasks. Utilizing our dataset to benchmark six state-of-the-art MLLMs, we uncover considerable difficulties MLLMs encounter in solving T2I-ICL. We identify the primary challenges as the inherent complexity of multimodality and image generation, and show that strategies such as fine-tuning and Chain-of-Thought prompting help to mitigate these difficulties, leading to notable improvements in performance. Our code and dataset are available at https:\/\/github.com\/UW-Madison-Lee-Lab\/CoBSAT.", "title":"Can MLLMs Perform Text-to-Image In-Context Learning?", "authors":[ "Yuchen Zeng", "Wonjun Kang", "Yicong Chen", "Hyung Il Koo", "Kangwook Lee" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/uw-madison-lee-lab\/cobsat" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":88 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=jq2kNXigPP", "bibtext":"@inproceedings{\nli2024destein,\ntitle={DeStein: Navigating Detoxification of Language Models via Universal Steering Pairs and Head-wise Activation Fusion},\nauthor={Yu Li and Han Jiang and Chuanyang Gong and Zhihua Wei},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=jq2kNXigPP}\n}", "abstract":"Despite the remarkable achievements of language models (LMs) across a broad spectrum of tasks, their propensity for generating toxic outputs remains a prevalent concern. Current solutions involving finetuning or auxiliary models usually require extensive computational resources, hindering their practicality in large language models (LLMs). In this paper, we propose DeStein, a novel method that detoxifies LMs by applying representation engineering in activation spaces with lower resource and time costs. Specifically, we derive detoxification vectors from self-induced, universal steering pairs through arithmetic operations in activation spaces. During inference, detoxification is achieved by fusing the detoxification vectors with the original representations in a head-wise manner. Empirical results demonstrate that our method significantly outperforms previous state-of-the-art approaches on various metrics, while also maintaining satisfactory generation quality and diversity. We further validate the practicality and scalability of DeStein with a series of white-box LLMs. Warning: Some example model outputs may contain highly offensive or disturbing text.", "title":"DeStein: Navigating Detoxification of Language Models via Universal Steering Pairs and Head-wise Activation Fusion", "authors":[ "Yu Li", "Han Jiang", "Chuanyang Gong", "Zhihua Wei" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.10464", "GitHub":[ "https:\/\/github.com\/lizlizli\/destein" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":89 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=j3AAkO5xgr", "bibtext":"@inproceedings{\nchen2024understanding,\ntitle={Understanding Retrieval Augmentation for Long-Form Question Answering},\nauthor={Hung-Ting Chen and Fangyuan Xu and Shane Arora and Eunsol Choi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=j3AAkO5xgr}\n}", "abstract":"How retrieved documents are used in language models (LMs) for long-form generation task is understudied. We present two controlled studies on retrieval-augmented LM for long-form question answering (LFQA): one fixing the LM and varying evidence documents and the other fixing evidence documents and varying the LMs.\nWe study various attributes of generated answers (e.g., fluency, length, variance), with an emphasis on the attribution of generated answers to in-context evidence documents. We collect a dataset (SALAD) containing human annotations of sentence-level answer attribution in LFQA and evaluate existing methods for automatically judging attribution.\nWe find that while LMs can leverage relevant in-context documents, the generated answer is only partially attributable towards the documents, especially for LMs trained without retrieval augmentation.\nTogether, our analysis reveals how retrieval augmentation impacts long knowledge-rich text generation and provide directions for future work.", "title":"Understanding Retrieval Augmentation for Long-Form Question Answering", "authors":[ "Hung-Ting Chen", "Fangyuan Xu", "Shane Arora", "Eunsol Choi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.12150", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.12150", "n_linked_authors":0, "upvotes":1, "num_comments":1, "n_authors":4, "Models":[ ], "Datasets":[ "lytang\/LLM-AggreFact", "osunlp\/AttributionBench" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":90 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ig6NI9oPhD", "bibtext":"@inproceedings{\nqin2024lampo,\ntitle={{LAMPO}: Large Language Models as Preference Machines for Few-shot Ordinal Classification},\nauthor={Zhen Qin and Junru Wu and Jiaming Shen and Tianqi Liu and Xuanhui Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ig6NI9oPhD}\n}", "abstract":"We introduce LAMPO, a novel paradigm that leverages Large Language Models (LLMs) for solving few-shot multi-class ordinal classification tasks. Unlike conventional methods, which concatenate all demonstration examples with the test instance and prompt LLMs to produce the pointwise prediction, our framework uses the LLM as a preference machine that makes a relative comparative decision between the test instance and each demonstration. A self-supervised method is then introduced to aggregate these binary comparisons into the final ordinal decision. LAMPO addresses several limitations inherent in previous methods, including context length constraints, ordering biases, and challenges associated with absolute point-wise estimation. Extensive experiments on seven public datasets demonstrate LAMPO's remarkably competitive performance across a diverse spectrum of applications (e.g., movie review analysis and hate speech detection). Notably, in certain applications, the improvement can be substantial, exceeding 20% in an absolute term. Moreover, we believe LAMPO represents an interesting addition to the non-parametric application layered on top of LLMs, as it supports black-box LLMs without necessitating the outputting of LLM's internal states (e.g., embeddings), as seen in previous approaches.", "title":"LAMPO: Large Language Models as Preference Machines for Few-shot Ordinal Classification", "authors":[ "Zhen Qin", "Junru Wu", "Jiaming Shen", "Tianqi Liu", "Xuanhui Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":91 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=iMqJsQ4evS", "bibtext":"@inproceedings{\nzhang2024llm,\ntitle={{LLM} as a Mastermind: A Survey of Strategic Reasoning with Large Language Models},\nauthor={Yadong Zhang and Shaoguang Mao and Tao Ge and Xun Wang and Yan Xia and Wenshan Wu and Ting Song and Man Lan and Furu Wei},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=iMqJsQ4evS}\n}", "abstract":"This paper presents a comprehensive survey of the current status and opportunities for Large Language Models (LLMs) in strategic reasoning, a sophisticated form of reasoning that necessitates understanding and predicting adversary actions in multi-agent settings while adjusting strategies accordingly. Strategic reasoning is distinguished by its focus on the dynamic and uncertain nature of interactions among multi-agents, where comprehending the environment and anticipating the behavior of others is crucial. We explore the scopes, applications, methodologies, and evaluation metrics related to strategic reasoning with LLMs, highlighting the burgeoning development in this area and the interdisciplinary approaches enhancing their decision-making performance. It aims to systematize and clarify the scattered literature on this subject, providing a systematic review that underscores the importance of strategic reasoning as a critical cognitive capability and offers insights into future research directions and potential improvements.", "title":"LLM as a Mastermind: A Survey of Strategic Reasoning with Large Language Models", "authors":[ "Yadong Zhang", "Shaoguang Mao", "Tao Ge", "Xun Wang", "Yan Xia", "Wenshan Wu", "Ting Song", "Man Lan", "Furu Wei" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01230", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":92 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=iI1CzEhEMU", "bibtext":"@inproceedings{\nxu2024do,\ntitle={Do Large Language Models Have Compositional Ability? An Investigation into Limitations and Scalability},\nauthor={Zhuoyan Xu and Zhenmei Shi and Yingyu Liang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=iI1CzEhEMU}\n}", "abstract":"Large language models (LLMs) have emerged as powerful tools for many AI problems and exhibit remarkable in-context learning (ICL) capabilities. Compositional ability, solving unseen complex tasks that combine two or more simple tasks, is an essential reasoning ability for Artificial General Intelligence. \nDespite the tremendous success of LLMs, how they approach composite tasks, especially those not encountered during the pretraining phase, remains an open and largely underexplored question.\nIn this study, we delve into the ICL capabilities of LLMs on composite tasks, with only simple tasks as in-context examples. We develop a test suite of composite tasks including linguistic and logical challenges and perform empirical studies across different LLM families. We observe that models exhibit divergent behaviors: (1) For simpler composite tasks that apply distinct mapping mechanisms to different input segments, the models demonstrate decent compositional ability, while scaling up the model enhances this ability; (2) for more complex composite tasks involving reasoning multiple steps, where each step represents one task, models typically underperform, and scaling up generally provides no improvements. \nWe offer theoretical analysis in a simplified setting, explaining that models exhibit compositional capability when the task handles different input parts separately.\nWe believe our work sheds new light on the capabilities of LLMs in solving composite tasks regarding the nature of the tasks and model scale. Our dataset and code are available at {\\url{https:\/\/github.com\/OliverXUZY\/LLM_Compose}}.", "title":"Do Large Language Models Have Compositional Ability? An Investigation into Limitations and Scalability", "authors":[ "Zhuoyan Xu", "Zhenmei Shi", "Yingyu Liang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.15720", "GitHub":[ "https:\/\/github.com\/oliverxuzy\/llm_compose" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":93 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=i2oJjC0ESQ", "bibtext":"@inproceedings{\nlong2024does,\ntitle={Does In-Context Learning Really Learn? Rethinking How Large Language Models Respond and Solve Tasks via In-Context Learning},\nauthor={Quanyu Long and Yin Wu and Wenya Wang and Sinno Jialin Pan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=i2oJjC0ESQ}\n}", "abstract":"In-context Learning (ICL) has emerged as a powerful capability alongside the development of scaled-up large language models (LLMs). By instructing LLMs using few-shot demonstrative examples, ICL enables them to perform a wide range of tasks without updating millions of parameters. However, the precise contributions of demonstrations towards improving end-task performance have not been thoroughly investigated in recent analytical studies. In this paper, we empirically decompose the overall performance of ICL into three dimensions, label space, format, and discrimination, and we evaluate four general-purpose LLMs across a diverse range of tasks. Counter-intuitively, we find that the demonstrations have a marginal impact on provoking discriminative knowledge of language models. However, ICL exhibits significant efficacy in regulating the label space and format, which helps LLMs respond to desired label words. We then demonstrate that this ability functions similar to detailed instructions for LLMs to follow. We additionally provide an in-depth analysis of the mechanism of retrieval helping with ICL. Our findings demonstrate that retrieving the semantically similar examples notably boosts the model's discriminative capability. However, we also observe a trade-off in selecting good in-context examples regarding label diversity.", "title":"Does In-Context Learning Really Learn? Rethinking How Large Language Models Respond and Solve Tasks via In-Context Learning", "authors":[ "Quanyu Long", "Yin Wu", "Wenya Wang", "Sinno Jialin Pan" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":94 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=hDoN0CAy5e", "bibtext":"@inproceedings{\ncao2024characterizing,\ntitle={Characterizing Multimodal Long-form Summarization: A Case Study on Financial Reports},\nauthor={Tianyu Cao and Natraj Raman and Danial Dervovic and Chenhao Tan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=hDoN0CAy5e}\n}", "abstract":"As large language models (LLMs) expand the power of natural language processing to handle long inputs, rigorous and systematic analyses are necessary to understand their abilities and behavior. A salient application is summarization, due to its ubiquity and controversy (e.g., researchers have declared the death of summarization). In this paper, we use financial report summarization as a case study because financial reports are not only long but also use numbers and tables extensively. We propose a computational framework for characterizing multimodal long-form summarization and investigate the behavior of Claude 2.0\/2.1, GPT-4\/3.5, and Cohere. We find that GPT-3.5 and Cohere fail to perform this summarization task meaningfully. For Claude 2 and GPT-4, we analyze the extractiveness of the summary and identify a position bias in LLMs. This position bias disappears after shuffling the input for Claude, which suggests that Claude seems to recognize important information. We also conduct a comprehensive investigation on the use of numeric data in LLM-generated summaries and offer a taxonomy of numeric hallucination. We employ prompt engineering to improve GPT-4's use of numbers with limited success. Overall, our analyses highlight the strong capability of Claude 2 in handling long multimodal inputs compared to GPT-4. The generated summaries and evaluation code are available at https:\/\/github.com\/ChicagoHAI\/characterizing-multimodal-long-form-summarization.", "title":"Characterizing Multimodal Long-form Summarization: A Case Study on Financial Reports", "authors":[ "Tianyu Cao", "Natraj Raman", "Danial Dervovic", "Chenhao Tan" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.06162", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":95 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=h5umhm6mzj", "bibtext":"@inproceedings{\nsinghal2024nofuneval,\ntitle={NoFunEval: Funny How Code {LM}s Falter on Requirements Beyond Functional Correctness},\nauthor={Manav Singhal and Tushar Aggarwal and Abhijeet Awasthi and Nagarajan Natarajan and Aditya Kanade},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=h5umhm6mzj}\n}", "abstract":"Existing evaluation benchmarks of language models of code (code LMs) focus almost exclusively on whether the LMs can generate functionally-correct code. In real-world software engineering, developers think beyond functional correctness. They have requirements on \"how'' a functionality should be implemented to meet overall system design objectives like efficiency, security, and maintainability. They would also trust the code LMs more if the LMs demonstrate robust understanding of such requirements.\n\nWe propose a new benchmark NoFunEval to evaluate code LMs on non-functional requirements and simple classification instances for both functional and non-functional requirements. We propose a prompting method, Coding Concepts (CoCo), as a way for a developer to communicate the domain knowledge to the LMs. We conduct an extensive evaluation of twenty-two code LMs. Our finding is that they generally falter when tested on our benchmark, hinting at fundamental blindspots in their training setups. Surprisingly, even the classification accuracy on functional-correctness instances derived from the popular HumanEval benchmark is low, calling in question the depth of their comprehension and the source of their success in generating functionally-correct code in the first place. We release our benchmark and evaluation scripts publicly at https:\/\/aka.ms\/NoFunEval.", "title":"NoFunEval: Funny How Code LMs Falter on Requirements Beyond Functional Correctness", "authors":[ "Manav Singhal", "Tushar Aggarwal", "Abhijeet Awasthi", "Nagarajan Natarajan", "Aditya Kanade" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.15963", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.15963", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ "ManavSinghal157\/NoFunEval" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":96 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=gpgMRWgv9Q", "bibtext":"@inproceedings{\ngupta2024targen,\ntitle={Tar{GEN}: Targeted Data Generation with Large Language Models},\nauthor={Himanshu Gupta and Kevin Scaria and Ujjwala Anantheswaran and Shreyas Verma and Mihir Parmar and Saurabh Arjun Sawant and Chitta Baral and Swaroop Mishra},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=gpgMRWgv9Q}\n}", "abstract":"We present TarGEN, a multi-step prompting strategy for generating high-quality synthetic datasets using LLMs. An advantage of TarGEN is its seedless nature; it does not require specific task instances, broadening its applicability beyond task replication. This differentiates it from other data generation techniques, as it can be leveraged for novel or highly domain-specific tasks with no existing data instances.\nWe augment TarGEN with a self-correction module that enables LLMs to rectify inaccurately labeled instances during dataset creation, ensuring reliable labels. To assess our technique\u2019s effectiveness against existing baselines, we emulate eight tasks from the SuperGLUE benchmark to create a \"synthetic\" version and finetune various language models on both synthetic and original training sets. Evaluation on the original test set reveals that models trained on the synthetic datasets perform \u223c 1 \u2212 3% points higher than those trained on original datasets. Finally, when pre-finetuned on our \"synthetic\" SuperGLUE dataset, Llama2 (7B) yields impressive results on the OpenLLM leaderboard, surpassing the model trained on the Self-Instruct dataset by 2.62% points. Our analysis reveals that the synthetic data generated by TarGEN not only improves model learning, but also has comparable or higher levels of complexity, diversity, and similar levels of bias in comparison with the original data.", "title":"TarGEN: Targeted Data Generation with Large Language Models", "authors":[ "Himanshu Gupta", "Kevin Scaria", "Ujjwala Anantheswaran", "Shreyas Verma", "Mihir Parmar", "Saurabh Arjun Sawant", "Chitta Baral", "Swaroop Mishra" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kevinscaria\/targen" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":97 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=gUNeyiLNxr", "bibtext":"@inproceedings{\nlepori2024uncovering,\ntitle={Uncovering Intermediate Variables in Transformers using Circuit Probing},\nauthor={Michael A. Lepori and Thomas Serre and Ellie Pavlick},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=gUNeyiLNxr}\n}", "abstract":"Neural network models have achieved high performance on a wide variety\nof complex tasks, but the algorithms that they implement are notoriously\ndifficult to interpret. It is often necessary to hypothesize intermediate variables involved in a network\u2019s computation in order to understand these\nalgorithms. For example, does a language model depend on particular\nsyntactic properties when generating a sentence? Yet, existing analysis\ntools make it difficult to test hypotheses of this type. We propose a new\nanalysis technique \u2013 circuit probing \u2013 that automatically uncovers low-level\ncircuits that compute hypothesized intermediate variables. This enables\ncausal analysis through targeted ablation at the level of model parameters.\nWe apply this method to models trained on simple arithmetic tasks, demonstrating its effectiveness at (1) deciphering the algorithms that models have\nlearned, (2) revealing modular structure within a model, and (3) tracking\nthe development of circuits over training. Across these three experiments\nwe demonstrate that circuit probing combines and extends the capabilities of existing methods, providing one unified approach for a variety of\nanalyses. Finally, we demonstrate circuit probing on a real-world use case:\nuncovering circuits that are responsible for subject-verb agreement and\nreflexive anaphora in GPT2-Small and Medium.", "title":"Uncovering Intermediate Variables in Transformers using Circuit Probing", "authors":[ "Michael A. Lepori", "Thomas Serre", "Ellie Pavlick" ], "id":"Conference", "type":"Poster", "arxiv_id":"2311.04354", "GitHub":[ "https:\/\/github.com\/mlepori1\/circuit_probing" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":98 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=gQAEGSGVnN", "bibtext":"@inproceedings{\nfang2024unimem,\ntitle={UniMem: Towards a Unified View of Long-Context Large Language Models},\nauthor={Junjie Fang and Likai Tang and Hongzhe Bi and Yujia Qin and Si Sun and Zhenyu Li and Haolun Li and Yongjian Li and Xin Cong and Yankai Lin and Yukun Yan and Xiaodong Shi and Sen Song and Zhiyuan Liu and Maosong Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=gQAEGSGVnN}\n}", "abstract":"Long-context processing is a critical ability that constrains the applicability of large language models (LLMs). Although there exist various methods devoted to enhancing the long-context processing ability of LLMs, they are developed in an isolated manner and lack systematic analysis and integration of their strengths, hindering further developments. In this paper, we introduce UniMem, a Unified framework that reformulates existing long-context methods from the view of Memory augmentation of LLMs. Distinguished by its four core dimensions\u2014Memory Management, Memory Writing, Memory Reading, and Memory Injection, UniMem empowers researchers to conduct systematic exploration of long-context methods. We re-formulate 16 existing methods based on UniMem and analyze four representative methods: Transformer-XL, Memorizing Transformer, RMT, and Longformer into equivalent UniMem forms to reveal their design principles and strengths. Based on these analyses, we propose UniMix, an innovative approach that integrates the strengths of these algorithms. Experimental results show that UniMix achieves superior performance in handling long contexts with significantly lower perplexity than baselines. The code is publicly available at https:\/\/github.com\/thunlp\/UniMem.", "title":"UniMem: Towards a Unified View of Long-Context Large Language Models", "authors":[ "Junjie Fang", "Likai Tang", "Hongzhe Bi", "Yujia Qin", "Si Sun", "Zhenyu Li", "Haolun Li", "Yongjian Li", "Xin Cong", "Yankai Lin", "Yukun Yan", "Xiaodong Shi", "Sen Song", "Zhiyuan Liu", "Maosong Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/thunlp\/unimem" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":99 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=fib9qidCpY", "bibtext":"@inproceedings{\nhennigen2024towards,\ntitle={Towards Verifiable Text Generation with Symbolic References},\nauthor={Lucas Torroba Hennigen and Zejiang Shen and Aniruddha Nrusimha and Bernhard Gapp and David Sontag and Yoon Kim},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=fib9qidCpY}\n}", "abstract":"LLMs are vulnerable to hallucinations, and thus their outputs generally require laborious human verification for high-stakes applications. To this end, we propose symbolically grounded generation (SymGen) as a simple approach for enabling easier manual validation of an LLM\u2019s output. SymGen prompts an LLM to interleave its regular output text with explicit symbolic references to fields present in some conditioning data (e.g., a table in JSON format). The references can be used to display the provenance of different spans of text in the generation, reducing the effort required for manual verification. Across a range of data-to-text and question-answering exper- iments, we find that LLMs are able to directly output text that makes use of accurate symbolic references while maintaining fluency and factuality. In a human study we further find that such annotations can streamline human verification of machine-generated text.", "title":"Towards Verifiable Text Generation with Symbolic References", "authors":[ "Lucas Torroba Hennigen", "Zejiang Shen", "Aniruddha Nrusimha", "Bernhard Gapp", "David Sontag", "Yoon Kim" ], "id":"Conference", "type":"Poster", "arxiv_id":"2311.09188", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2311.09188", "n_linked_authors":3, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":100 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=egVSgtJJAx", "bibtext":"@inproceedings{\nliu2024visualwebbench,\ntitle={VisualWebBench: How Far Have Multimodal {LLM}s Evolved in Web Page Understanding and Grounding?},\nauthor={Junpeng Liu and Yifan Song and Bill Yuchen Lin and Wai Lam and Graham Neubig and Yuanzhi Li and Xiang Yue},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=egVSgtJJAx}\n}", "abstract":"Multimodal Large Language models (MLLMs) have shown promise in web-related tasks, but evaluating their performance in the web domain remains a challenge due to the lack of comprehensive benchmarks. Existing benchmarks are either designed for general multimodal tasks, failing to capture the unique characteristics of web pages, or focus on end-to-end web agent tasks, unable to measure fine-grained abilities such as OCR, understanding, and grounding. In this paper, we introduce VisualWebBench, a multimodal benchmark designed to assess the capabilities of MLLMs across a variety of web tasks. VisualWebBench consists of seven tasks, and comprises 1.5K human-curated instances from 139 real websites, covering 87 sub-domains. We evaluate 16 open-source MLLMs, Gemini Pro, Claude-3 series, and GPT-4V(ision) on VisualWebBench, revealing significant challenges and performance gaps. Further analysis highlights the limitations of current MLLMs, including inadequate grounding in text-rich environments and subpar performance with low-resolution image inputs. We believe VisualWebBench will serve as a valuable resource for the research community and contribute to the creation of more powerful and versatile MLLMs for web-related applications.", "title":"VisualWebBench: How Far Have Multimodal LLMs Evolved in Web Page Understanding and Grounding?", "authors":[ "Junpeng Liu", "Yifan Song", "Bill Yuchen Lin", "Wai Lam", "Graham Neubig", "Yuanzhi Li", "Xiang Yue" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05955", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.05955", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ "visualwebbench\/VisualWebBench" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":101 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=eJ3cHNu7ss", "bibtext":"@inproceedings{\nchen2024huatuogptii,\ntitle={Huatuo{GPT}-{II}, One-stage Training for Medical Adaption of {LLM}s},\nauthor={Junying Chen and Xidong Wang and Ke Ji and Anningzhe Gao and Feng Jiang and Shunian Chen and Hongbo Zhang and Song Dingjie and Wenya Xie and Chuyi Kong and Jianquan Li and Xiang Wan and Haizhou Li and Benyou Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=eJ3cHNu7ss}\n}", "abstract":"Adapting a language model (LM) into a specific domain, *a.k.a* domain adaption, is a common practice when specialized knowledge, e.g. medicine, is not encapsulated in a general language model like Llama2. This typically involves a two-stage process including *continued pre-training* and *supervised fine-tuning*. Implementing a pipeline solution with these two stages not only introduces complexities (necessitating dual meticulous tuning) but also leads to two occurrences of data distribution shifts, exacerbating catastrophic forgetting. To mitigate these, we propose a one-stage domain adaption protocol where heterogeneous data from both the traditional pre-training and supervised stages are unified into a simple instruction-output pair format to achieve efficient knowledge injection. Subsequently, a data priority sampling strategy is introduced to adaptively adjust data mixture during training. Following this protocol, we train HuatuoGPT-II, a specialized LLM for the medical domain in Chinese. HuatuoGPT-II achieve competitive performance with GPT4 across multiple benchmarks, which especially shows the state-of-the-art (SOTA) performance in multiple Chinese medical benchmarks and the newest pharmacist licensure examinations. Furthermore, we explore the phenomenon of one-stage protocols, and the experiments reflect that the simplicity of the proposed protocol improves training stability and domain generalization.", "title":"HuatuoGPT-II, One-stage Training for Medical Adaption of LLMs", "authors":[ "Junying Chen", "Xidong Wang", "Ke Ji", "Anningzhe Gao", "Feng Jiang", "Shunian Chen", "Hongbo Zhang", "Song Dingjie", "Wenya Xie", "Chuyi Kong", "Jianquan Li", "Xiang Wan", "Haizhou Li", "Benyou Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/freedomintelligence\/huatuogpt-ii" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":102 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=eGCw1UVOhk", "bibtext":"@inproceedings{\nkirchenbauer2024lmd,\ntitle={{LMD}3: Language Model Data Density Dependence},\nauthor={John Kirchenbauer and Garrett Honke and Gowthami Somepalli and Jonas Geiping and Katherine Lee and Daphne Ippolito and Tom Goldstein and David Andre},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=eGCw1UVOhk}\n}", "abstract":"We develop a methodology for analyzing language model task performance at the individual example level based on training data density estimation. Experiments with paraphrasing as a controlled intervention on finetuning data demonstrate that increasing the support in the training distribution for specific test queries results in a measurable increase in density, which is also a significant predictor of the performance increase caused by the intervention. Experiments with pretraining data demonstrate that we can explain a significant fraction of the variance in model perplexity via density measurements. We conclude that our framework can provide statistical evidence of the dependence of a target model\u2019s predictions on subsets of its training data, and can more generally be used to characterize the support (or lack thereof) in the training data for a given test task.", "title":"LMD3: Language Model Data Density Dependence", "authors":[ "John Kirchenbauer", "Garrett Honke", "Gowthami Somepalli", "Jonas Geiping", "Katherine Lee", "Daphne Ippolito", "Tom Goldstein", "David Andre" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":103 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=eDWcNqiQWW", "bibtext":"@inproceedings{\nahrabian2024the,\ntitle={The Curious Case of Nonverbal Abstract Reasoning with Multi-Modal Large Language Models},\nauthor={Kian Ahrabian and Zhivar Sourati and Kexuan Sun and Jiarui Zhang and Yifan Jiang and Fred Morstatter and Jay Pujara},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=eDWcNqiQWW}\n}", "abstract":"While large language models (LLMs) are still being adopted to new domains and utilized in novel applications, we are experiencing an influx of the new generation of foundation models, namely multi-modal large language models (MLLMs). These models integrate verbal and visual information, opening new possibilities to demonstrate more complex reasoning abilities at the intersection of the two modalities. However, despite the revolutionizing prospect of MLLMs, our understanding of their reasoning abilities is limited. In this study, we assess the nonverbal abstract reasoning abilities of open-source and closed-source MLLMs using variations of Raven's Progressive Matrices. Our experiments reveal the challenging nature of such problems for MLLMs while showcasing the immense gap between open-source and closed-source models. We also uncover critical shortcomings of visual and textual perceptions, subjecting the models to low-performance ceilings. Finally, to improve MLLMs' performance, we experiment with different methods, such as Chain-of-Thought prompting, leading to a significant (up to 100\\%) boost in performance.", "title":"The Curious Case of Nonverbal Abstract Reasoning with Multi-Modal Large Language Models", "authors":[ "Kian Ahrabian", "Zhivar Sourati", "Kexuan Sun", "Jiarui Zhang", "Yifan Jiang", "Fred Morstatter", "Jay Pujara" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.12117", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.12117", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":104 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dribhnhm1i", "bibtext":"@inproceedings{\nliu2024tuning,\ntitle={Tuning Language Models by Proxy},\nauthor={Alisa Liu and Xiaochuang Han and Yizhong Wang and Yulia Tsvetkov and Yejin Choi and Noah A. Smith},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dribhnhm1i}\n}", "abstract":"Despite the general capabilities of large pretrained language models, they consistently benefit from further adaptation to better achieve desired behaviors. However, tuning these models has become increasingly resource-intensive, or impossible when model weights are private. We introduce **proxy-tuning**, a lightweight decoding-time algorithm that operates on top of black-box LMs to achieve the same end as direct tuning, but by accessing only its predictions over the output vocabulary, not its parameters. Our method tunes a *smaller* LM, then applies the difference between the predictions of the small tuned and untuned LMs to shift the original predictions of the larger untuned model in the direction of tuning, while retaining the benefits of larger-scale pretraining. In experiments, when we apply proxy-tuning to Llama2-70B using proxies of only 7B size, we can close 88% of the gap between Llama2-70B and its truly-tuned chat version, when evaluated across knowledge, reasoning, and safety benchmarks. We then demonstrate the generality of proxy-tuning by applying it to domain adaptation on code, and task-specific finetuning on question-answering and math problems. Finally, we show how to proxy-tune a truly black-box LM, GPT-3.5, for temporal adaptation, increasing its knowledge about recent events. Our work demonstrates the promise of using small tuned LMs to efficiently customize large, potentially proprietary LMs through decoding-time guidance.", "title":"Tuning Language Models by Proxy", "authors":[ "Alisa Liu", "Xiaochuang Han", "Yizhong Wang", "Yulia Tsvetkov", "Yejin Choi", "Noah A. Smith" ], "id":"Conference", "type":"Oral", "arxiv_id":"2401.08565", "GitHub":[ "https:\/\/github.com\/alisawuffles\/proxy-tuning" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.08565", "n_linked_authors":4, "upvotes":20, "num_comments":2, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":105 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dnwRScljXr", "bibtext":"@inproceedings{\nkamoi2024evaluating,\ntitle={Evaluating {LLM}s at Detecting Errors in {LLM} Responses},\nauthor={Ryo Kamoi and Sarkar Snigdha Sarathi Das and Renze Lou and Jihyun Janice Ahn and Yilun Zhao and Xiaoxin Lu and Nan Zhang and Yusen Zhang and Haoran Ranran Zhang and Sujeeth Reddy Vummanthala and Salika Dave and Shaobo Qin and Arman Cohan and Wenpeng Yin and Rui Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dnwRScljXr}\n}", "abstract":"With Large Language Models (LLMs) being widely used across various tasks, detecting errors in their responses is increasingly crucial. However, little research has been conducted on error detection of LLM responses. Collecting error annotations on LLM responses is challenging due to the subjective nature of many NLP tasks, and thus previous research focuses on tasks of little practical value (e.g., word sorting) or limited error types (e.g., faithfulness in summarization). This work introduces ReaLMistake, the first error detection benchmark consisting of objective, realistic, and diverse errors made by LLMs. ReaLMistake contains three challenging and meaningful tasks that introduce objectively assessable errors in four categories (reasoning correctness, instruction-following, context-faithfulness, and parameterized knowledge), eliciting naturally observed and diverse errors in responses of GPT-4 and Llama 2 70B annotated by experts. We use ReaLMistake to evaluate error detectors based on 12 LLMs. Our findings show: 1) Top LLMs like GPT-4 and Claude 3 detect errors made by LLMs at very low recall, and all LLM-based error detectors perform much worse than humans. 2) Explanations by LLM-based error detectors lack reliability. 3) LLMs-based error detection is sensitive to small changes in prompts but remains challenging to improve. 4) Popular approaches to improving LLMs, including self-consistency and majority vote, do not improve the error detection performance. Our benchmark and code are provided at https:\/\/github.com\/psunlpgroup\/ReaLMistake.", "title":"Evaluating LLMs at Detecting Errors in LLM Responses", "authors":[ "Ryo Kamoi", "Sarkar Snigdha Sarathi Das", "Renze Lou", "Jihyun Janice Ahn", "Yilun Zhao", "Xiaoxin Lu", "Nan Zhang", "Yusen Zhang", "Haoran Ranran Zhang", "Sujeeth Reddy Vummanthala", "Salika Dave", "Shaobo Qin", "Arman Cohan", "Wenpeng Yin", "Rui Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03602", "GitHub":[ "https:\/\/github.com\/psunlpgroup\/realmistake" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.03602", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":15, "Models":[ ], "Datasets":[ "ryokamoi\/realmistake" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":106 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dkpeWQRmlc", "bibtext":"@inproceedings{\nhe2024hdt,\ntitle={{HDT}: Hierarchical Document Transformer},\nauthor={Haoyu He and Markus Flicke and Jan Buchmann and Iryna Gurevych and Andreas Geiger},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dkpeWQRmlc}\n}", "abstract":"In this paper, we propose the Hierarchical Document Transformer (HDT), a novel sparse Transformer architecture tailored for structured hierarchical documents. Such documents are extremely important in numerous domains, including science, law or medicine. However, most existing solutions are inefficient and fail to make use of the structure inherent to documents. HDT exploits document structure by introducing auxiliary anchor tokens and redesigning the attention mechanism into a sparse multi-level hierarchy. This approach facilitates information exchange between tokens at different levels while maintaining sparsity, thereby enhancing computational and memory efficiency while exploiting the document structure as an inductive bias. We address the technical challenge of implementing HDT's sample-dependent hierarchical attention pattern by developing a novel sparse attention kernel that considers the hierarchical structure of documents. As demonstrated by our experiments, utilizing structural information present in documents leads to faster convergence, higher sample efficiency and better performance on downstream tasks.", "title":"HDT: Hierarchical Document Transformer", "authors":[ "Haoyu He", "Markus Flicke", "Jan Buchmann", "Iryna Gurevych", "Andreas Geiger" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":107 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dj9x6JuiD5", "bibtext":"@inproceedings{\nwang2024with,\ntitle={With Greater Text Comes Greater Necessity: Inference-Time Training Helps Long Text Generation},\nauthor={Yan Wang and Dongyang Ma and Deng Cai},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dj9x6JuiD5}\n}", "abstract":"Long text generation, such as novel writing and discourse-level translation with extremely long contexts, presents significant challenges to current language models. Existing methods mainly focus on extending the model's context window through strategies like length extrapolation. However, these approaches demand substantial hardware resources during the training and\/or inference phases.\n\nOur proposed method, Temp-Lora, introduces an alternative concept. Instead of relying on the KV cache to store all context information, we embeds this information directly into a temporary Lora module. In the process of long text generation, this module is progressively trained with text generated previously. This approach not only efficiently preserves contextual knowledge but also prevents any permanent alteration to the model's parameters given that the module is discarded post-generation.\n\nExtensive experiments on the PG19 language modeling benchmark and the GuoFeng discourse-level translation benchmark validate the effectiveness of Temp-Lora. Our results show that: 1) Temp-Lora substantially enhances generation quality for long text, as indicated by a 13.2\\% decrease in perplexity (PPL) on a subset of PG19, and a 29.3\\% decrease in PPL along with a 113.2\\% increase in BLEU score on a subset of GuoFeng, 2) Temp-Lora is compatible with and enhances most existing long text generation methods, and 3) Temp-Lora can greatly reduce computational costs by shortening the context window. For example, we can ensure a moderate improvement in generation quality (a decrease of 3.8\\% in PPL) while enabling a 51.5\\% memory usage reduction and a 60.0\\% decrease in latency for inference.", "title":"With Greater Text Comes Greater Necessity: Inference-Time Training Helps Long Text Generation", "authors":[ "Yan Wang", "Dongyang Ma", "Deng Cai" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.11504", "GitHub":[ "https:\/\/github.com\/temporarylora\/temp-lora" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.11504", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":108 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=didvEO1can", "bibtext":"@inproceedings{\nlin2024catcode,\ntitle={CatCode: A Comprehensive Evaluation Framework for {LLM}s On the Mixture of Code and Text},\nauthor={Zhenru Lin and Yiqun Yao and Yang Yuan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=didvEO1can}\n}", "abstract":"Large language models (LLMs) such as ChatGPT are increasingly proficient in understanding and generating a mixture of code and text. Evaluation based on such *mixture* can lead to a more comprehensive understanding of the models' abilities in solving coding problems. However, in this context, current evaluation methods are either limited in task coverage or lack standardization. To address this issue, we propose using category theory as a framework for evaluation. Specifically, morphisms within a code category can represent code debugging and transformation, functors between two categories represent code translation, and functors between a code category and a natural language category represent code generation, explanation, and reproduction. We present an automatic evaluation framework called **CatCode** (**Cat**egory **Code**) that can comprehensively assess the coding abilities of LLMs, including ChatGPT, Text-Davinci, and CodeGeeX.\nLarge language models (LLMs) are increasingly proficient in understanding and generating a mixture of code and text. Evaluation based on such *mixture* can lead to a more comprehensive understanding of the models' abilities in solving coding problems. However, current evaluation methods are either limited in task coverage or lack standardization. To address this issue, we propose to apply category theory as math abstraction for code-related evaluation. Specifically, morphisms within a code category can represent code debugging and transformation, functors between two categories represent code translation, and functors between a code category and a natural language category represent code generation and explanation. We present an automatic evaluation framework called **CatCode** (**Cat**egory *Code*) that can assess the coding abilities of various ChatGPT-like LLMs in a *comprehensive* and *standard* way, and further support *composite* task evaluation. The code can be found in https:\/\/github.com\/scorpio-nova\/CatCode.", "title":"CatCode: A Comprehensive Evaluation Framework for LLMs On the Mixture of Code and Text", "authors":[ "Zhenru Lin", "Yiqun Yao", "Yang Yuan" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.01784", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":109 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dcbNzhVVQj", "bibtext":"@inproceedings{\nyao2024learning,\ntitle={Learning From Correctness Without Prompting Makes {LLM} Efficient Reasoner},\nauthor={Yuxuan YAO and Han Wu and Zhijiang Guo and Zhou Biyan and Jiahui Gao and Sichun Luo and Hanxu Hou and Xiaojin Fu and Linqi Song},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dcbNzhVVQj}\n}", "abstract":"Large language models (LLMs) have demonstrated outstanding performance across various tasks, yet they still exhibit limitations such as hallucination, unfaithful reasoning, and toxic content. One potential approach to mitigate these issues is learning from human or external feedback (e.g. tools). In this paper, we introduce an intrinsic self-correct reasoning framework for LLMs that eliminates the need for human feedback, external tools, and handcraft prompts. The proposed framework, based on a multi-step reasoning paradigm \\textbf{Le}arning from \\textbf{Co}rrectness (\\textsc{LeCo}), improves reasoning performance without needing to learn from errors. This paradigm prioritizes learning from correct reasoning steps, and a unique method to measure confidence for each reasoning step based on generation logits. Experimental results across various multi-step reasoning tasks demonstrate the effectiveness of the framework in improving reasoning performance with reduced token consumption.", "title":"Learning From Correctness Without Prompting Makes LLM Efficient Reasoner", "authors":[ "Yuxuan YAO", "Han Wu", "Zhijiang Guo", "Zhou Biyan", "Jiahui Gao", "Sichun Luo", "Hanxu Hou", "Xiaojin Fu", "Linqi Song" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.19094", "GitHub":[ "https:\/\/github.com\/starrYYxuan\/LeCo" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.19094", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":9, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":110 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dWYRjT501w", "bibtext":"@inproceedings{\nbronzini2024unveiling,\ntitle={Unveiling {LLM}s: The Evolution of Latent Representations in a Dynamic Knowledge Graph},\nauthor={Marco Bronzini and Carlo Nicolini and Bruno Lepri and Jacopo Staiano and Andrea Passerini},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dWYRjT501w}\n}", "abstract":"Large Language Models (LLMs) demonstrate an impressive capacity to recall a vast range of factual knowledge. \nHowever, understanding their underlying reasoning and internal mechanisms in exploiting this knowledge remains a key research area.\nThis work unveils the factual information an LLM represents internally for sentence-level claim verification.\nWe propose an end-to-end framework to decode factual knowledge embedded in token representations from a vector space to a set of ground predicates, showing its layer-wise evolution using a dynamic knowledge graph.\nOur framework employs activation patching, a vector-level technique that alters a token representation during inference, to extract encoded knowledge.\nAccordingly, we neither rely on training nor external models.\nUsing factual and common-sense claims from two claim verification datasets, we showcase interpretability analyses at local and global levels.\nThe local analysis highlights entity centrality in LLM reasoning, from claim-related information and multi-hop reasoning to representation errors causing erroneous evaluation.\nOn the other hand, the global reveals trends in the underlying evolution, such as word-based knowledge evolving into claim-related facts.\nBy interpreting semantics from LLM latent representations and enabling graph-related analyses, this work enhances the understanding of the factual knowledge resolution process.", "title":"Unveiling LLMs: The Evolution of Latent Representations in a Dynamic Knowledge Graph", "authors":[ "Marco Bronzini", "Carlo Nicolini", "Bruno Lepri", "Jacopo Staiano", "Andrea Passerini" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03623", "GitHub":[ "https:\/\/github.com\/Ipazia-AI\/latent-explorer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":111 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dJfBejh478", "bibtext":"@inproceedings{\nyao2024scalable,\ntitle={Scalable Model Editing via Customized Expert Networks},\nauthor={Zihan Yao and Yu He and Tianyu Qi and Ming Li},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dJfBejh478}\n}", "abstract":"Addressing the issues of hallucinations and outdated knowledge in large language models is critical for their reliable application. Model Editing presents a promising avenue for mitigating these challenges in a cost\u0002effective manner. However, existing methods often suffer from unsatis\u0002factory generalization and unintended effects on non-edited samples. To overcome these limitations, we introduce a novel approach: Scalable Model Editing via Customized Expert Networks (SCEN), which is a two-stage continuous training paradigm. Specifically, in the first stage, we train lightweight expert networks individually for each piece of knowledge that needs to be updated. Subsequently, we train a corresponding indexing neuron for each expert to control the activation state of that expert. We con\u0002ducted a series of experiments on the ZsRE and Hallucination benchmarks by tuning the advanced open-source LLM, Llama2, achieving state-of-the\u0002art results compared to current mainstream methods. Our code is available at https:\/\/github.com\/TAL-auroraX\/SCEN.", "title":"Scalable Model Editing via Customized Expert Networks", "authors":[ "Zihan Yao", "Yu He", "Tianyu Qi", "Ming Li" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.02699", "GitHub":[ "https:\/\/github.com\/tal-aurorax\/scen" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":112 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=dJMTn3QOWO", "bibtext":"@inproceedings{\nmishra2024finegrained,\ntitle={Fine-grained Hallucination Detection and Editing for Language Models},\nauthor={Abhika Mishra and Akari Asai and Vidhisha Balachandran and Yizhong Wang and Graham Neubig and Yulia Tsvetkov and Hannaneh Hajishirzi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=dJMTn3QOWO}\n}", "abstract":"Large language models (LMs) are prone to generate factual errors, which are often called hallucinations. In this paper, we introduce a comprehensive taxonomy of hallucinations and argue that hallucinations manifest in diverse forms, each requiring varying degrees of careful assessments to verify factuality. We propose a novel task of automatic fine-grained hallucination detection and construct a new evaluation benchmark, FavaBench, that includes about one thousand fine-grained human judgments on three LM outputs across various domains. Our analysis reveals that ChatGPT and Llama2-Chat (70B, 7B) exhibit diverse types of hallucinations in the majority of their outputs in information-seeking scenarios. We train FAVA, a retrieval-augmented LM by carefully creating synthetic data to detect and correct fine-grained hallucinations. On our benchmark, our automatic and human evaluations show that FAVA significantly outperforms ChatGPT and GPT-4 on fine-grained hallucination detection, and edits suggested by FAVA improve the factuality of LM-generated text.", "title":"Fine-grained Hallucination Detection and Editing for Language Models", "authors":[ "Abhika Mishra", "Akari Asai", "Vidhisha Balachandran", "Yizhong Wang", "Graham Neubig", "Yulia Tsvetkov", "Hannaneh Hajishirzi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":113 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=cKBmZ2PZ6c", "bibtext":"@inproceedings{\nxiao2024orag,\ntitle={{ORAG}: Ontology-Guided Retrieval-Augmented Generation for Theme-Specific Entity Typing},\nauthor={Jinfeng Xiao and Linyi Ding and James Barry and Mohab Elkaref and Geeth De Mel and Jiawei Han},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=cKBmZ2PZ6c}\n}", "abstract":"Large language models (LLMs) incorporated with retrieval-augmented generation (RAG) have shown great power in many NLP tasks, including fine-grained entity typing (FET). However, we observe that recent LLMs can easily suffer from hallucinations on highly specialized and fast-evolving themes (e.g., redox-active organic electrode materials), especially in the following cases: (1) unseen entities: an entity never appears in the pre-training corpora of LLMs; and (2) misleading semantics: the context of an entity can potentially mislead an entity typing algorithm if the relevant knowledge is not correctly retrieved and utilized. To address these challenges, this paper proposes an Ontology-Guided Retrieval-Augmented Generation (ORAG) approach that incorporates ontology structures with RAG for the theme-specific entity typing task. ORAG first enriches the label ontology with external knowledge and constructs a structured knowledge unit for each node. Then, it retrieves the relevant nodes by dense passage retrieval and expands the retrieved results based on the ontological structure. In this way, more supporting knowledge will be retrieved within the limited input of LLMs for entity typing. In the evaluation, we construct a dataset with two themes for theme-specific entity typing with a focus on unseen entities and misleading semantics. We observe notable cases of hallucination when vanilla RAG is applied to Llama-3, GPT-3.5, and GPT-4, while ORAG can effectively mitigate such hallucinations and improve the results.", "title":"ORAG: Ontology-Guided Retrieval-Augmented Generation for Theme-Specific Entity Typing", "authors":[ "Jinfeng Xiao", "Linyi Ding", "James Barry", "Mohab Elkaref", "Geeth De Mel", "Jiawei Han" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":114 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=cG1EbmWiSs", "bibtext":"@inproceedings{\nhuang2024unified,\ntitle={Unified View of Grokking, Double Descent and Emergent Abilities: A Comprehensive Study on Algorithm Task},\nauthor={Yufei Huang and Shengding Hu and Xu Han and Zhiyuan Liu and Maosong Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=cG1EbmWiSs}\n}", "abstract":"Recent studies have uncovered intriguing phenomena in deep learning, such as *grokking*, *double descent*, and *emergent abilities* in large language models, which challenge human intuition and are crucial for a deeper understanding of neural models. In this paper, we present a comprehensive study on algorithm task to provide a unified view of these three phenomena, with a focus on the interplay between memorization and generalization. Through extensive experiments spanning a wide range of model sizes and training data quantities, we uncover four distinct training dynamics, each arising from unique combinations of model size and training data quantity, formulating a theoretical framework for further analysis. Utilizing this framework, we establish connections between *double descent* and *grokking* and propose two verifiable predictions regarding the occurrence of *double descent*, both substantiated by our experimental results. Moreover, we expand our experiments to the multi-task learning paradigm, demonstrating how algorithm tasks can be turned into emergent abilities by mixing some pure memorization data. This offers a novel perspective to understand *emergent abilities* in Large Language Models.", "title":"Unified View of Grokking, Double Descent and Emergent Abilities: A Comprehensive Study on Algorithm Task", "authors":[ "Yufei Huang", "Shengding Hu", "Xu Han", "Zhiyuan Liu", "Maosong Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":115 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=c30qeMg8dv", "bibtext":"@inproceedings{\nnahar2024fakes,\ntitle={Fakes of Varying Shades: How Warning Affects Human Perception and Engagement Regarding {LLM} Hallucinations},\nauthor={Mahjabin Nahar and Haeseung Seo and Eun-Ju Lee and Aiping Xiong and Dongwon Lee},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=c30qeMg8dv}\n}", "abstract":"The widespread adoption and transformative effects of large language models (LLMs) have sparked concerns regarding their capacity to produce inaccurate and fictitious content, referred to as `hallucinations'. Given the potential risks associated with hallucinations, humans should be able to identify them. This research aims to understand the human perception of LLM hallucinations by systematically varying the degree of hallucination (genuine, minor hallucination, major hallucination) and examining its interaction with warning (i.e., a warning of potential inaccuracies: absent vs. present). Participants ($N=419$) from Prolific rated the perceived accuracy and engaged with content (e.g., like, dislike, share) in a Q\/A format. Results indicate that humans rank content as truthful in the order genuine > minor hallucination > major hallucination and user engagement behaviors mirror this pattern. More importantly, we observed that warning improves hallucination detection without significantly affecting the perceived truthfulness of genuine content. We conclude by offering insights for future tools to aid human detection of hallucinations.", "title":"Fakes of Varying Shades: How Warning Affects Human Perception and Engagement Regarding LLM Hallucinations", "authors":[ "Mahjabin Nahar", "Haeseung Seo", "Eun-Ju Lee", "Aiping Xiong", "Dongwon Lee" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03745", "GitHub":[ "https:\/\/github.com\/mahjabinnahar\/fakes-of-varying-shades-survey-materials" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":116 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=bwo3GVsgOv", "bibtext":"@inproceedings{\nwagner2024personalized,\ntitle={Personalized Collaborative Fine-Tuning for On-Device Large Language Models},\nauthor={Nicolas Wagner and Dongyang Fan and Martin Jaggi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=bwo3GVsgOv}\n}", "abstract":"We explore on-device collaborative fine-tuning of large language models under limited local data availability. We introduce three distinct dynamic collaborator selection schemes, allowing trust-weighted personalized update aggregation: model-similarity-based, prediction-similarity-based and validation-performance-based. To minimize communication overhead, we integrate Low-Rank Adaptation (LoRA) and only exchange LoRA model updates. Our protocols, driven by prediction and performance metrics, surpass both FedAvg and local fine-tuning methods, which is particularly evident in realistic distributed scenarios with more diverse local data distributions. The results underscore the effectiveness of our approach in addressing heterogeneity and scarcity of the local datasets.", "title":"Personalized Collaborative Fine-Tuning for On-Device Large Language Models", "authors":[ "Nicolas Wagner", "Dongyang Fan", "Martin Jaggi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/epfml\/personalized-collaborative-llms" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":117 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=bttKwCZDkm", "bibtext":"@inproceedings{\nsaxon2024benchmarks,\ntitle={Benchmarks as Microscopes: A Call for Model Metrology},\nauthor={Michael Saxon and Ari Holtzman and Peter West and William Yang Wang and Naomi Saphra},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=bttKwCZDkm}\n}", "abstract":"Modern language models (LMs) pose a new challenge in capability assessment. Static benchmarks inevitably saturate without providing confidence in the deployment tolerances of LM-based systems, but developers nonetheless claim that their models have generalized traits such as reasoning or open-domain language understanding based on these flawed metrics. The science and practice of LMs requires a new approach to benchmarking which measures specific capabilities with dynamic assessments. To be confident in our metrics, we need a new discipline of *model metrology*---one which focuses on how to generate benchmarks that predict performance under deployment. Motivated by our evaluation criteria, we outline how building a community of model metrology practitioners---one focused on building tools and studying how to measure system capabilities---is the best way to meet these needs to and add clarity to the AI discussion.", "title":"Benchmarks as Microscopes: A Call for Model Metrology", "authors":[ "Michael Saxon", "Ari Holtzman", "Peter West", "William Yang Wang", "Naomi Saphra" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.16711", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":118 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=bo4pauxnIR", "bibtext":"@inproceedings{\nnam2024tabular,\ntitle={Tabular Transfer Learning via Prompting {LLM}s},\nauthor={Jaehyun Nam and Woomin Song and Seong Hyeon Park and Jihoon Tack and Sukmin Yun and Jaehyung Kim and Kyu Hwan Oh and Jinwoo Shin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=bo4pauxnIR}\n}", "abstract":"Learning with a limited number of labeled data is a central problem in real-world applications of machine learning, as it is often expensive to obtain annotations. To deal with the scarcity of labeled data, transfer learning is a conventional approach; it suggests to learn a transferable knowledge by training a neural network from multiple other sources. In this paper, we investigate transfer learning of tabular tasks, which has been less studied and successful in the literature, compared to other domains, e.g., vision and language.\nThis is because tables are inherently heterogeneous, i.e., they contain different columns and feature spaces, making transfer learning difficult. On the other hand, recent advances in natural language processing suggest that the label scarcity issue can be mitigated by utilizing in-context learning capability of large language models (LLMs). Inspired by this and the fact that LLMs can also process tables within a unified language space, we ask whether LLMs can be effective for tabular transfer learning, in particular, under the scenarios where the source and target datasets are of different format. As a positive answer, we propose a novel tabular transfer learning framework, coined Prompt to Transfer (P2T), that utilizes unlabeled (or heterogeneous) source data with LLMs. Specifically, P2T identifies a column feature in a source dataset that is strongly correlated with a target task feature to create examples relevant to the target task, thus creating pseudo-demonstrations for prompts. Experimental results demonstrate that P2T outperforms previous methods on various tabular learning benchmarks, showing good promise for the important, yet underexplored tabular transfer learning problem. Code is available at https:\/\/github.com\/jaehyun513\/P2T.", "title":"Tabular Transfer Learning via Prompting LLMs", "authors":[ "Jaehyun Nam", "Woomin Song", "Seong Hyeon Park", "Jihoon Tack", "Sukmin Yun", "Jaehyung Kim", "Kyu Hwan Oh", "Jinwoo Shin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.11063", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":119 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=bnscREWUuc", "bibtext":"@inproceedings{\nrichburg2024how,\ntitle={How Multilingual are Large Language Models Fine-tuned for Translation?},\nauthor={Aquia Richburg and Marine Carpuat},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=bnscREWUuc}\n}", "abstract":"A new paradigm for machine translation has recently emerged: fine-tuning large language models on parallel text has been shown to outperform dedicated translation systems trained in a supervised fashion on much larger amounts of parallel data (Xu et al. 2024, Alves et al. 2024). However, it remains unclear whether this paradigm can enable massively multilingual machine translation or whether it requires fine-tuning dedicated models for a small number of language pairs. How does translation fine-tuning impact the MT capabilities of LLMs for zero-shot languages, zero-shot language pairs, and translation tasks that do not involve English? To address these questions, we conduct an extensive empirical evaluation of the translation quality of the TOWER family of language models (Alves et al. 2024) on 132 translation tasks from the multi-parallel FLORES data. We find that translation fine-tuning improves translation quality even for zero-shot languages on average, but that the impact is uneven depending on the language pairs involved. These results call for further research to effectively enable massively multilingual translation with LLMs.", "title":"How Multilingual are Large Language Models Fine-tuned for Translation?", "authors":[ "Aquia Richburg", "Marine Carpuat" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":120 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=bkY8zEDdH9", "bibtext":"@inproceedings{\nxiao2024od,\ntitle={O3D: Offline Data-driven Discovery and Distillation for Sequential Decision-Making with Large Language Models},\nauthor={Yuchen Xiao and Yanchao Sun and Mengda Xu and Udari Madhushani Sehwag and Jared Vann and Deepeka Garg and Sumitra Ganesh},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=bkY8zEDdH9}\n}", "abstract":"Recent advancements in large language models (LLMs) have exhibited promising performance in solving sequential decision-making problems. By imitating few-shot examples provided in the prompts (i.e., in-context learning), an LLM agent can interact with an external environment and complete given tasks without additional training. However, such few-shot examples are often insufficient to generate high-quality solutions for complex and long-horizon tasks, while the limited context length cannot consume larger-scale demonstrations with long interaction horizons. To this end, we propose an offline learning framework that utilizes offline data at scale (e.g, logs of human interactions) to improve LLM-powered policies without fine-tuning. The proposed method O3D (Offline Data-driven Discovery and Distillation) automatically discovers reusable skills and distills generalizable knowledge across multiple tasks based on offline interaction data, advancing the capability of solving downstream tasks. Empirical results under two interactive decision-making benchmarks (ALFWorld and WebShop) verify that O3D can notably enhance the decision-making capabilities of LLMs through the offline discovery and distillation process, and consistently outperform baselines across various LLMs.", "title":"O3D: Offline Data-driven Discovery and Distillation for Sequential Decision-Making with Large Language Models", "authors":[ "Yuchen Xiao", "Yanchao Sun", "Mengda Xu", "Udari Madhushani Sehwag", "Jared Vann", "Deepeka Garg", "Sumitra Ganesh" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.14403", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":121 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=b0y6fbSUG0", "bibtext":"@inproceedings{\nhao2024llm,\ntitle={{LLM} Reasoners: New Evaluation, Library, and Analysis of Step-by-Step Reasoning with Large Language Models},\nauthor={Shibo Hao and Yi Gu and Haotian Luo and Tianyang Liu and Xiyan Shao and Xinyuan Wang and Shuhua Xie and Haodi Ma and Adithya Samavedhi and Qiyue Gao and Zhen Wang and Zhiting Hu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=b0y6fbSUG0}\n}", "abstract":"Reasoning is a pivotal skill in the evolution of Large Language Models (LLMs), and constructing step-by-step reasoning chains is essential for enhancing their reasoning abilities. Despite a rich array of recent research aimed at deriving improved reasoning chains from LLMs, two major challenges hinder the progress in this field: the lack of effective methods to evaluate reasoning chains, and the absence of systematic analysis of reasoning algorithms. In this work, we introduce RICE, a novel LLM-based approach for automated evaluation of reasoning chains, which autonomously constructs a detailed evaluation criteria list to help itself recognize intermediate reason-\ning mistakes. This fully automatic method proves to be more precise than existing metrics and offers a complementary angle to conventional answer-based evaluations. For the second challenge, we present a formulation that connects extensive existing reasoning algorithms. LLM Reasoners, a modular library for step-by-step reasoning algorithms, is developed based on the formulation. It enables users to specify problem domains and reasoning strategies with minimal effort. With the help of the new metric and library, we make a comprehensive study of the factors contributing to a reasoning algorithm, including the reward, the exploration strategy, the world model, and the prompt format, with interesting findings unveiled through RICE.", "title":"LLM Reasoners: New Evaluation, Library, and Analysis of Step-by-Step Reasoning with Large Language Models", "authors":[ "Shibo Hao", "Yi Gu", "Haotian Luo", "Tianyang Liu", "Xiyan Shao", "Xinyuan Wang", "Shuhua Xie", "Haodi Ma", "Adithya Samavedhi", "Qiyue Gao", "Zhen Wang", "Zhiting Hu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05221", "GitHub":[ "https:\/\/github.com\/maitrix-org\/llm-reasoners" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.05221", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":12, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":122 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=av0D19pSkU", "bibtext":"@inproceedings{\nduan2024do,\ntitle={Do Membership Inference Attacks Work on Large Language Models?},\nauthor={Michael Duan and Anshuman Suri and Niloofar Mireshghallah and Sewon Min and Weijia Shi and Luke Zettlemoyer and Yulia Tsvetkov and Yejin Choi and David Evans and Hannaneh Hajishirzi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=av0D19pSkU}\n}", "abstract":"Membership inference attacks (MIAs) attempt to predict whether a particular datapoint is a member of a target model's training data. Despite extensive research on traditional machine learning models, there has been limited work studying MIA on the pre-training data of large language models (LLMs). We perform a large-scale evaluation of MIAs over a suite of language models (LMs) trained on the Pile, ranging from 160M to 12B parameters. We find that MIAs barely outperform random guessing for most settings across varying LLM sizes and domains. Further analyses reveal that this poor performance can be attributed to (1) the combination of a large dataset and few training iterations, and (2) an inherently fuzzy boundary between members and non-members. We also find that, when LLMs have been shown to be vulnerable to MIAs, this apparent success can be attributed to a distribution shift, e.g., members and non-members are seemingly drawn from identical domain but with different temporal ranges. Finally, we observe that existing MIAs are highly sensitive to even small changes in a sample. Such changes may cause samples that are lexically or semantically similar to members to be classified as non-members, which may be at odds with leakage that privacy auditors care about. We release our code and data as a unified benchmark package that includes all existing MIAs, supporting future work.", "title":"Do Membership Inference Attacks Work on Large Language Models?", "authors":[ "Michael Duan", "Anshuman Suri", "Niloofar Mireshghallah", "Sewon Min", "Weijia Shi", "Luke Zettlemoyer", "Yulia Tsvetkov", "Yejin Choi", "David Evans", "Hannaneh Hajishirzi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/iamgroot42\/mimir" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":123 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=amhPBLFYWv", "bibtext":"@inproceedings{\nmichaelov2024revenge,\ntitle={Revenge of the Fallen? Recurrent Models Match Transformers at Predicting Human Language Comprehension Metrics},\nauthor={James Michaelov and Catherine Arnett and Ben Bergen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=amhPBLFYWv}\n}", "abstract":"Transformers have generally supplanted recurrent neural networks as the dominant architecture for both natural language processing tasks and for modelling the effect of predictability on online human language comprehension. However, two recently developed recurrent model architectures, RWKV and Mamba, appear to perform natural language tasks comparably to or better than transformers of equivalent scale. In this paper, we show that contemporary recurrent models are now also able to match\u2014and in some cases, exceed\u2014performance of comparably sized transformers at modeling online human language comprehension. This suggests that transformer language models are not uniquely suited to this task, and opens up new directions for debates about the extent to which architectural features of language models make them better or worse models of human language comprehension.", "title":"Revenge of the Fallen? Recurrent Models Match Transformers at Predicting Human Language Comprehension Metrics", "authors":[ "James Michaelov", "Catherine Arnett", "Ben Bergen" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.19178", "GitHub":[ "https:\/\/github.com\/jmichaelov\/recurrent-vs-transformer-modeling" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":124 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=aajyHYjjsk", "bibtext":"@inproceedings{\nmarks2024the,\ntitle={The Geometry of Truth: Emergent Linear Structure in Large Language Model Representations of True\/False Datasets},\nauthor={Samuel Marks and Max Tegmark},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=aajyHYjjsk}\n}", "abstract":"Large Language Models (LLMs) have impressive capabilities, but are prone to outputting falsehoods. Recent work has developed techniques for inferring whether a LLM is telling the truth by training probes on the LLM's internal activations. However, this line of work is controversial, with some authors pointing out failures of these probes to generalize in basic ways, among other conceptual issues. In this work, we use high-quality datasets of simple true\/false statements to study in detail the structure of LLM representations of truth, drawing on three lines of evidence: 1. Visualizations of LLM true\/false statement representations, which reveal clear linear structure. 2. Transfer experiments in which probes trained on one dataset generalize to different datasets. 3. Causal evidence obtained by surgically intervening in a LLM's forward pass, causing it to treat false statements as true and vice versa. Overall, we present evidence that at sufficient scale, LLMs *linearly represent* the truth or falsehood of factual statements. We also show that simple difference-in-mean probes generalize as well as other probing techniques while identifying directions which are more causally implicated in model outputs.", "title":"The Geometry of Truth: Emergent Linear Structure in Large Language Model Representations of True\/False Datasets", "authors":[ "Samuel Marks", "Max Tegmark" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.06824", "GitHub":[ "https:\/\/github.com\/saprmarks\/geometry-of-truth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":125 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=aKwQPRjdGa", "bibtext":"@inproceedings{\nwu2024hummer,\ntitle={Hummer: Towards Limited Competitive Preference Dataset},\nauthor={Yusen Wu and Li Jiang and Junwu Xiong and Jingqing Ruan and Yichuan Ding and Qingpei Guo and zujie wen and JUN ZHOU and Xiaotie Deng},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=aKwQPRjdGa}\n}", "abstract":"Preference datasets are essential for incorporating human preferences into pre-trained language models, playing a key role in the success of Reinforcement Learning from Human Feedback. However, these datasets often demonstrate conflicting alignment objectives, leading to increased vulnerability to jailbreak attacks and challenges in adapting downstream tasks to prioritize specific alignment objectives without negatively impacting others. In this work, we introduce a novel statistical metric, Alignment Dimension Conflict, to quantify the degree of conflict within preference datasets. We then present \\texttt{Hummer} and its fine-grained variant, \\texttt{Hummer-F}, as innovative pairwise preference datasets with reduced-conflict alignment objectives. \\texttt{Hummer} is built based on UltraFeedback and is enhanced by AI feedback from GPT-4, marking as the first preference dataset aimed at reducing the competition between alignment objectives. Furthermore, we develop reward models, \\texttt{HummerRM} and \\texttt{HummerRM-F}, which employ a hybrid sampling approach to balance diverse alignment objectives effectively. This sampling method positions \\texttt{HummerRM} as an ideal model for domain-specific further fine-tuning and reducing vulnerability to jailbreak attacks.", "title":"Hummer: Towards Limited Competitive Preference Dataset", "authors":[ "Yusen Wu", "Li Jiang", "Junwu Xiong", "Jingqing Ruan", "Yichuan Ding", "Qingpei Guo", "zujie wen", "JUN ZHOU", "Xiaotie Deng" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.11647", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.11647", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":9, "Models":[ ], "Datasets":[ "sarinw-2024\/Hummer" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":126 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=aKkAwZB6JV", "bibtext":"@inproceedings{\ntunstall2024zephyr,\ntitle={Zephyr: Direct Distillation of {LM} Alignment},\nauthor={Lewis Tunstall and Edward Emanuel Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro Von Werra and Cl{\\'e}mentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M Rush and Thomas Wolf},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=aKkAwZB6JV}\n}", "abstract":"We aim to produce a smaller language model that is aligned to user intent. Previous research has shown that applying distilled supervised fine-tuning (dSFT) on larger models significantly improves task accuracy; however, these models are unaligned, i.e. they do not respond well to natural prompts. To distill this property, we experiment with the use of preference data from AI Feedback (AIF). Starting from a dataset of outputs ranked by a teacher model, we apply distilled direct preference optimization (dDPO) to learn a chat model with significantly improved intent alignment. The approach requires only a few hours of training without any additional sampling during fine-tuning. The final result, Zephyr-7B, set a new state-of-the-art on chat benchmarks for 7B parameter models, and requires no human annotation. In particular, results on MT-Bench show that Zephyr-7B surpassed Llama2-Chat-70B, at the time the best open-access RLHF-based model.", "title":"Zephyr: Direct Distillation of LM Alignment", "authors":[ "Lewis Tunstall", "Edward Emanuel Beeching", "Nathan Lambert", "Nazneen Rajani", "Kashif Rasul", "Younes Belkada", "Shengyi Huang", "Leandro Von Werra", "Cl\u00e9mentine Fourrier", "Nathan Habib", "Nathan Sarrazin", "Omar Sanseviero", "Alexander M Rush", "Thomas Wolf" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/huggingface\/alignment-handbook" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":127 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Zu8OWNUC0u", "bibtext":"@inproceedings{\nfehr2024nonparametric,\ntitle={Nonparametric Variational Regularisation of Pretrained Transformers},\nauthor={Fabio James Fehr and James Henderson},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Zu8OWNUC0u}\n}", "abstract":"Pretrained transformers have demonstrated impressive abilities, but tend not to generalise well out-of-domain and are very expensive to fine-tune on new domain data. Nonparametric Variational Information Bottleneck (NVIB) has been proposed as a regulariser for training cross-attention in transformers, potentially addressing this domain overfitting problem. We extend the NVIB framework to replace all types of attention functions in transformers. We show that existing pretrained transformers can be reinterpreted as nonparametric variational models using an empirical prior distribution and identity initialisation with controllable hyperparameters. We then show that changing the initialisation introduces a novel, information-theoretic post-training regularisation in the attention mechanism, which improves out-of-domain generalisation on NLP tasks without any additional training. This success supports the hypothesis that the way pretrained transformer embeddings represent information is accurately characterised by nonparametric variational Bayesian models.", "title":"Nonparametric Variational Regularisation of Pretrained Transformers", "authors":[ "Fabio James Fehr", "James Henderson" ], "id":"Conference", "type":"Poster", "arxiv_id":"2312.00662", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2312.00662", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":2, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":128 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Zt1dwG8xrK", "bibtext":"@inproceedings{\nhron2024training,\ntitle={Training Language Models on the Knowledge Graph: Insights on Hallucinations and Their Detectability},\nauthor={Jiri Hron and Laura A Culp and Gamaleldin Fathy Elsayed and Rosanne Liu and Jasper Snoek and Simon Kornblith and Alex Rizkowsky and Isabelle Simpson and Jascha Sohl-Dickstein and Noah Fiedel and Aaron T Parisi and Alexander A Alemi and Azade Nova and Ben Adlam and Bernd Bohnet and Gaurav Mishra and Hanie Sedghi and Izzeddin Gur and Jaehoon Lee and John D Co-Reyes and Kathleen Kenealy and Kelvin Xu and Kevin Swersky and Igor Mordatch and Lechao Xiao and Maxwell Bileschi and Peter J Liu and Roman Novak and Sharad Vikram and Tris Warkentin and Jeffrey Pennington},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Zt1dwG8xrK}\n}", "abstract":"While many capabilities of language models (LMs) improve with increased training budget, the influence of scale on hallucinations is not yet fully understood. Hallucinations come in many forms, and there is no universally accepted definition. We thus focus on studying only those hallucinations where a correct answer appears verbatim in the training set. To fully control the training data content, we construct a knowledge graph (KG)-based dataset, and use it to train a set of increasingly large LMs. We find that fora fixed dataset, larger and longer-trained LMs hallucinate less. However, hallucinating on\u22645% of the training data requires an order of magnitude larger model, and thus an order of magnitude more compute, than Hoffmann et al. (2022) reported was optimal. Given this costliness, we study how hallucination detectors depend on scale. While we see detector size improves performance on fixed LM\u2019s outputs, we find an inverse relationship between the scale of the LM and the detectability of its hallucinations.", "title":"Training Language Models on the Knowledge Graph: Insights on Hallucinations and Their Detectability", "authors":[ "Jiri Hron", "Laura A Culp", "Gamaleldin Fathy Elsayed", "Rosanne Liu", "Jasper Snoek", "Simon Kornblith", "Alex Rizkowsky", "Isabelle Simpson", "Jascha Sohl-Dickstein", "Noah Fiedel", "Aaron T Parisi", "Alexander A Alemi", "Azade Nova", "Ben Adlam", "Bernd Bohnet", "Gaurav Mishra", "Hanie Sedghi", "Izzeddin Gur", "Jaehoon Lee", "John D Co-Reyes", "Kathleen Kenealy", "Kelvin Xu", "Kevin Swersky", "Igor Mordatch", "Lechao Xiao", "Maxwell Bileschi", "Peter J Liu", "Roman Novak", "Sharad Vikram", "Tris Warkentin", "Jeffrey Pennington" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":129 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Zq9Dfj4nBo", "bibtext":"@inproceedings{\nweiss2024redesigning,\ntitle={Redesigning Information Markets in the Era of Language Models},\nauthor={Martin Weiss and Nasim Rahaman and Manuel Wuthrich and Yoshua Bengio and Li Erran Li and Bernhard Sch{\\\"o}lkopf and Christopher Pal},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Zq9Dfj4nBo}\n}", "abstract":"Information markets face many challenges leading to instability, inefficiency, and failure, ultimately reducing incentives for the creation and distribution of high-quality information. A long-standing issue for information markets is the Buyer's Inspection Paradox: buyers need to inspect information to assess its value, while sellers must limit inspection to prevent unauthorized use or theft. This paradox results from the information asymmetry present in the market, where sellers know more about the quality of their goods than buyers. This work proposes an information market design that leverages language models to mitigate the Buyer's Inspection Paradox by enabling inspection, comparison, and purchase of information, while algorithmically preventing expropriation. Our experiments (a) show methods that improve the economic rationality of language models, (b) investigate how language model behaviour changes with the price of goods, and (c) evaluate the simulated cost-efficiency of the proposed market under various conditions.", "title":"Redesigning Information Markets in the Era of Language Models", "authors":[ "Martin Weiss", "Nasim Rahaman", "Manuel Wuthrich", "Yoshua Bengio", "Li Erran Li", "Bernhard Sch\u00f6lkopf", "Christopher Pal" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":130 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Zb0ajZ7vAt", "bibtext":"@inproceedings{\nshnitzer2024large,\ntitle={Large Language Model Routing with Benchmark Datasets},\nauthor={Tal Shnitzer and Anthony Ou and M{\\'\\i}rian Silva and Kate Soule and Yuekai Sun and Justin Solomon and Neil Thompson and Mikhail Yurochkin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Zb0ajZ7vAt}\n}", "abstract":"The number of open-source Large Language Models (LLMs) grows daily, as does the number of available benchmark datasets used to evaluate LLMs. While some models dominate these benchmarks, no single model achieves the best accuracy in all tasks and use cases. In light of this observation, we address the challenge of selecting the best LLM from a collection of pre-trained models, given a new task. While related work relies on evaluating each candidate model on a set of labeled examples, our new formulation does not assume any labeled data from the new task is available. Instead, we repurpose a collection of benchmark datasets---which may focus on different tasks than the one at hand---to learn a ''router'' model for LLM selection from inputs only; this problem reduces to a collection of binary classification tasks. Empirically, our strategy consistently improves performance over using any single model for all tasks.", "title":"Large Language Model Routing with Benchmark Datasets", "authors":[ "Tal Shnitzer", "Anthony Ou", "M\u00edrian Silva", "Kate Soule", "Yuekai Sun", "Justin Solomon", "Neil Thompson", "Mikhail Yurochkin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2309.15789", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2309.15789", "n_linked_authors":2, "upvotes":1, "num_comments":0, "n_authors":8, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":131 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ZZzXpyv65G", "bibtext":"@inproceedings{\nye2024language,\ntitle={Language Models as Critical Thinking Tools: A Case Study of Philosophers},\nauthor={Andre Ye and Jared Moore and Rose Novick and Amy X Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ZZzXpyv65G}\n}", "abstract":"Current work in language models (LMs) helps us speed up or even skip thinking by accelerating and automating cognitive work.\nBut can LMs help us with critical thinking -- thinking in deeper, more reflective ways which challenge assumptions, clarify ideas, and engineer new concepts?\nWe treat philosophy as a case study in critical thinking, and interview 21 professional philosophers about how they engage in critical thinking and on their experiences with LMs.\nWe find that philosophers do not find LMs to be useful because they lack a sense of selfhood (memory, beliefs, consistency) and initiative (curiosity, proactivity). We propose the selfhood-initiative model for critical thinking tools to characterize this gap.\nUsing the model, we formulate three roles LMs could play as critical thinking tools: the Interlocutor, the Monitor, and the Respondent.\nWe hope that our work inspires LM researchers to further develop LMs as critical thinking tools and philosophers and other `critical thinkers' to imagine intellectually substantive uses of LMs.", "title":"Language Models as Critical Thinking Tools: A Case Study of Philosophers", "authors":[ "Andre Ye", "Jared Moore", "Rose Novick", "Amy X Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.04516", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":132 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ZDdLamBX4P", "bibtext":"@inproceedings{\nnarayan2024cookbook,\ntitle={Cookbook: A framework for improving {LLM} generative abilities via programmatic data generating templates},\nauthor={Avanika Narayan and Mayee F Chen and Kush Bhatia and Christopher Re},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ZDdLamBX4P}\n}", "abstract":"Fine-tuning large language models (LLMs) on instruction datasets is a common way to improve their generative capabilities. \nHowever, instruction datasets can be expensive and time-consuming to manually curate, and while LLM-generated data is less labor-intensive, it may violate user privacy agreements or terms of service of LLM providers. Therefore, we seek a way of constructing instruction datasets with samples that are not generated by humans or LLMs but still improve LLM generative capabilities. In this work, we introduce Cookbook, a framework that programmatically generates training data consisting of simple patterns over random tokens, resulting in a scalable, cost-effective approach that avoids legal and privacy issues. First, Cookbook uses a template---a data generating Python function---to produce training data that encourages the model to learn an explicit pattern-based rule that corresponds to a desired task. We find that fine-tuning on Cookbook-generated data is able to improve performance on its corresponding task by up to 52.7 accuracy points. Second, since instruction datasets improve performance on multiple downstream tasks simultaneously, Cookbook algorithmically learns how to mix data from various templates to optimize performance on multiple tasks. On the standard multi-task GPT4ALL evaluation suite, Mistral-7B fine-tuned using a Cookbook-generated dataset attains the best accuracy on average compared to other 7B parameter instruction-tuned models and is the best performing model on 3 out of 8 tasks. Finally, we analyze when and why Cookbook improves performance and present a metric that allows us to verify that the improvement is largely explained by the model\u2019s generations adhering better to template rules.", "title":"Cookbook: A framework for improving LLM generative abilities via programmatic data generating templates", "authors":[ "Avanika Narayan", "Mayee F Chen", "Kush Bhatia", "Christopher Re" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":133 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=YwrNePfb3E", "bibtext":"@inproceedings{\nfeffer2024prompt,\ntitle={Prompt Exploration with Prompt Regression},\nauthor={Michael Feffer and Ronald Xu and Yuekai Sun and Mikhail Yurochkin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=YwrNePfb3E}\n}", "abstract":"In the advent of democratized usage of large language models (LLMs), there is a growing desire to systematize LLM prompt creation and selection processes beyond iterative trial-and-error. Prior works majorly focus on searching the space of prompts without accounting for relations between prompt variations. Here we propose a framework, Prompt Exploration with Prompt Regression (PEPR), to predict the effect of prompt combinations given results for individual prompt elements as well as a simple method to select an effective prompt for a given use-case. We evaluate our approach with open-source LLMs of different sizes on several different tasks.", "title":"Prompt Exploration with Prompt Regression", "authors":[ "Michael Feffer", "Ronald Xu", "Yuekai Sun", "Mikhail Yurochkin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.11083", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":134 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=YfHxQSoaWU", "bibtext":"@inproceedings{\nkim2024fables,\ntitle={{FABLES}: Evaluating faithfulness and content selection in book-length summarization},\nauthor={Yekyung Kim and Yapei Chang and Marzena Karpinska and Aparna Garimella and Varun Manjunatha and Kyle Lo and Tanya Goyal and Mohit Iyyer},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=YfHxQSoaWU}\n}", "abstract":"While long-context large language models (LLMs) can technically summarize book-length documents (> 100K tokens), the length and complexity of the documents have so far prohibited evaluations of input-dependent aspects like faithfulness. In this paper, we conduct the first large-scale human evaluation of faithfulness and content selection on LLM-generated summaries of fictional books. Our study mitigates the issue of data contamination by focusing on summaries of books published in 2023 or 2024, and we hire annotators who have fully read each book prior to the annotation task to minimize cost and cognitive burden. We collect FABLES, a dataset of annotations on 3,158 claims made in LLM-generated summaries of 26 books, at a cost of $5.2K USD, which allows us to rank LLM summarizers based on faithfulness: CLAUDE-3-OPUS significantly outperforms all closedsource LLMs, while the open-source MIXTRAL is on par with GPT-3.5-TURBO. An analysis of the annotations reveals that most unfaithful claims relate to events and character states, and they generally require indirect reasoning over the narrative to invalidate. While LLM-based auto-raters have proven reliable for factuality and coherence in other settings, we implement several LLM raters of faithfulness and find that none correlates strongly with human annotations, especially with regard to detecting unfaithful claims. Our experiments suggest that detecting unfaithful claims is an important future direction not only for summarization evaluation but also as a testbed for long-context understanding. Finally, we move beyond faithfulness by exploring content selection errors in book-length summarization: we develop a typology of omission errors related to crucial narrative elements and also identify a systematic over-emphasis on events occurring towards the end of the book. We release FABLES to spur further research on the evaluation of book-length summarization.", "title":"FABLES: Evaluating faithfulness and content selection in book-length summarization", "authors":[ "Yekyung Kim", "Yapei Chang", "Marzena Karpinska", "Aparna Garimella", "Varun Manjunatha", "Kyle Lo", "Tanya Goyal", "Mohit Iyyer" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mungg\/fables" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":135 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=YX7QnhxESU", "bibtext":"@inproceedings{\nliang2024mapping,\ntitle={Mapping the Increasing Use of {LLM}s in Scientific Papers},\nauthor={Weixin Liang and Yaohui Zhang and Zhengxuan Wu and Haley Lepp and Wenlong Ji and Xuandong Zhao and Hancheng Cao and Sheng Liu and Siyu He and Zhi Huang and Diyi Yang and Christopher Potts and Christopher D Manning and James Y. Zou},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=YX7QnhxESU}\n}", "abstract":"Scientific publishing lays the foundation of science by disseminating research findings, fostering collaboration, encouraging reproducibility, and ensuring that scientific knowledge is accessible, verifiable, and built upon over time. Recently, there has been immense speculation about how many people are using large language models (LLMs) like ChatGPT in their academic writing, and to what extent this tool might have an effect on global scientific practices. However, we lack a precise measure of the proportion of academic writing substantially modified or produced by LLMs. To address this gap, we conduct the first systematic, large-scale analysis across 950,965 papers published between January 2020 and February 2024 on the $\\textit{arXiv}$, $\\textit{bioRxiv}$, and $\\textit{Nature}$ portfolio journals, using a population-level statistical framework to measure the prevalence of LLM-modified content over time. The statistical framework operates on the population level without the need to perform inference on any individual instance. Our findings reveal a steady increase in LLM usage, with the largest and fastest growth observed in Computer Science papers (up to 17.5\\%). In comparison, Mathematics papers and the Nature portfolio showed the least LLM modification (up to 6.3\\%). Moreover, at an aggregate level, our analysis reveals that higher levels of LLM-modification are associated with papers whose first authors post preprints more frequently, papers in more crowded areas, and papers with shorter lengths. Our findings suggests that LLMs are being broadly used in scientific papers.", "title":"Mapping the Increasing Use of LLMs in Scientific Papers", "authors":[ "Weixin Liang", "Yaohui Zhang", "Zhengxuan Wu", "Haley Lepp", "Wenlong Ji", "Xuandong Zhao", "Hancheng Cao", "Sheng Liu", "Siyu He", "Zhi Huang", "Diyi Yang", "Christopher Potts", "Christopher D Manning", "James Y. Zou" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01268", "GitHub":[ "https:\/\/github.com\/Weixin-Liang\/Mapping-the-Increasing-Use-of-LLMs-in-Scientific-Papers" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":136 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=YDZ7GeFLxq", "bibtext":"@inproceedings{\ntan2024scattered,\ntitle={Scattered Mixture-of-Experts Implementation},\nauthor={Shawn Tan and Yikang Shen and Rameswar Panda and Aaron Courville},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=YDZ7GeFLxq}\n}", "abstract":"ScatterMoE is an implementation of Sparse Mixture-of-Experts (SMoE) on GPUs. ScatterMoE builds upon techniques in existing implementations, and overcoming some of the current limitations to improve batched inference, training speed, and memory footprint. This implementation achieves this by avoiding padding and making excessive copies of the input. We also fuse expert linear transforms and reordering operations with ParallelLinear, a module that can be used to extend the concept of SMoEs. We benchmark our implementation against Megablocks, and show that it enables a higher throughput and lower memory footprint. We also show how ParallelLinear enables extension of the Mixture-of-Experts concept by demonstrating with an implementation of Mixture-of-Attention.", "title":"Scattered Mixture-of-Experts Implementation", "authors":[ "Shawn Tan", "Yikang Shen", "Rameswar Panda", "Aaron Courville" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/shawntan\/scattermoe" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":137 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Xh1B90iBSR", "bibtext":"@inproceedings{\nwang2024what,\ntitle={What Are Tools Anyway? A Survey from the Language Model Perspective},\nauthor={Zhiruo Wang and Zhoujun Cheng and Hao Zhu and Daniel Fried and Graham Neubig},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Xh1B90iBSR}\n}", "abstract":"Language models (LMs) are powerful yet mostly for text generation tasks. Tools have substantially enhanced their performance for tasks that require complex skills. However, many works adopt the term \u201ctool\u201d in different ways, raising the question: What is a tool anyway? Subsequently, where and how do tools help LMs? In this survey, we provide a unified definition of tools as external programs used by LMs, and perform a systematic review of LM tooling scenarios and approaches. Grounded on this review, we empirically study the efficiency of various tooling methods by measuring their required compute and performance gains on various benchmarks, and highlight some challenges and potential future research in the field.", "title":"What Are Tools Anyway? A Survey from the Language Model Perspective", "authors":[ "Zhiruo Wang", "Zhoujun Cheng", "Hao Zhu", "Daniel Fried", "Graham Neubig" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.15452", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.15452", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":138 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=XII0Wp1XA9", "bibtext":"@inproceedings{\nliu2024a,\ntitle={A Dynamic {LLM}-Powered Agent Network for Task-Oriented Agent Collaboration},\nauthor={Zijun Liu and Yanzhe Zhang and Peng Li and Yang Liu and Diyi Yang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=XII0Wp1XA9}\n}", "abstract":"Recent studies show that collaborating multiple large language model (LLM) powered agents is a promising way for task solving. However, current approaches are constrained by using a fixed number of agents and static communication structures. In this work, we propose automatically selecting a team of agents from candidates to collaborate in a dynamic communication structure toward different tasks and domains. Specifically, we build a framework named Dynamic LLM-Powered Agent Network ($\\textbf{DyLAN}$) for LLM-powered agent collaboration, operating a two-stage paradigm: (1) Team Optimization and (2) Task Solving. During the first stage, we utilize an agent selection algorithm, based on an unsupervised metric called Agent Importance Score, enabling the selection of best agents according to their contributions in a preliminary trial, oriented to the given task. Then, in the second stage, the selected agents collaborate dynamically according to the query. Empirically, we demonstrate that DyLAN outperforms strong baselines in code generation, decision-making, general reasoning, and arithmetic reasoning tasks with moderate computational cost. On specific subjects in MMLU, selecting a team of agents in the team optimization stage improves accuracy by up to 25.0% in DyLAN.", "title":"A Dynamic LLM-Powered Agent Network for Task-Oriented Agent Collaboration", "authors":[ "Zijun Liu", "Yanzhe Zhang", "Peng Li", "Yang Liu", "Diyi Yang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":139 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=XGJBEeziEb", "bibtext":"@inproceedings{\nzhang2024data,\ntitle={Data Checklist: On Unit-Testing Datasets with Usable Information},\nauthor={Heidi Chenyu Zhang and Shabnam Behzad and Kawin Ethayarajh and Dan Jurafsky},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=XGJBEeziEb}\n}", "abstract":"Model checklists (Ribeiro et al., 2020) have emerged as a useful tool for understanding the behavior of LLMs, analogous to unit-testing in software engineering. However, despite datasets being a key determinant of model behavior, evaluating datasets -- e.g., for the existence of annotation artifacts -- is largely done ad hoc, once a problem in model behavior has already been found downstream.\nIn this work, we take a more principled approach to unit-testing datasets by proposing a taxonomy based on the $\\mathcal{V}$-information literature. We call a collection of such unit tests a data checklist.\nUsing the checklist, not only are we able to recover known artifacts in well-known datasets such as SNLI, but we also discover previously unknown artifacts in preference datasets for LLM alignment.\nData checklists further enable a new kind of data filtering, which we use to improve the efficacy and data efficiency of preference alignment.", "title":"Data Checklist: On Unit-Testing Datasets with Usable Information", "authors":[ "Heidi Chenyu Zhang", "Shabnam Behzad", "Kawin Ethayarajh", "Dan Jurafsky" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ChenyuHeidiZhang\/data_checklist" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":140 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=X9yV4lFHt4", "bibtext":"@inproceedings{\nneplenbroek2024mbbq,\ntitle={{MBBQ}: A Dataset for Cross-Lingual Comparison of Stereotypes in Generative {LLM}s},\nauthor={Vera Neplenbroek and Arianna Bisazza and Raquel Fern{\\'a}ndez},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=X9yV4lFHt4}\n}", "abstract":"Generative large language models (LLMs) have been shown to exhibit harmful biases and stereotypes. While safety fine-tuning typically takes place in English, if at all, these models are being used by speakers of many different languages. There is existing evidence that the performance of these models is inconsistent across languages and that they discriminate based on demographic factors of the user. Motivated by this, we investigate whether the social stereotypes exhibited by LLMs differ as a function of the language used to prompt them, while controlling for cultural differences and task accuracy. To this end, we present MBBQ (Multilingual Bias Benchmark for Question-answering), a carefully curated version of the English BBQ dataset extended to Dutch, Spanish, and Turkish, which measures stereotypes\ncommonly held across these languages. We further complement MBBQ with a parallel control dataset to measure task performance on the question-answering task independently of bias. Our results based on several open-source and proprietary LLMs confirm that some non-English languages suffer from bias more than English, even when controlling for cultural shifts. Moreover, we observe significant cross-lingual differences in bias behaviour for all except the most accurate models. With the release of MBBQ, we hope to encourage further research on bias in multilingual settings. The dataset and code are available at https:\/\/github.com\/Veranep\/MBBQ.", "title":"MBBQ: A Dataset for Cross-Lingual Comparison of Stereotypes in Generative LLMs", "authors":[ "Vera Neplenbroek", "Arianna Bisazza", "Raquel Fern\u00e1ndez" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/veranep\/mbbq" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":141 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=X1xNsuKssb", "bibtext":"@inproceedings{\nwang2024mambabyte,\ntitle={MambaByte: Token-free Selective State Space Model},\nauthor={Junxiong Wang and Tushaar Gangavarapu and Jing Nathan Yan and Alexander M Rush},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=X1xNsuKssb}\n}", "abstract":"Token-free language models learn directly from raw bytes and remove the inductive bias of subword tokenization. Operating on bytes, however, results in significantly longer sequences. In this setting, standard autoregressive Transformers scale poorly as the effective memory required grows with sequence length. The recent development of the Mamba state space model (SSM) offers an appealing alternative approach with a fixed-sized memory state and efficient decoding. We propose MambaByte, a token-free adaptation of the Mamba SSM trained autoregressively on byte sequences. In terms of modeling, we show MambaByte to be competitive with, and even to outperform, state-of-the-art subword Transformers on language modeling tasks while maintaining the benefits of token-free language models, such as robustness to noise. In terms of efficiency, we develop an adaptation of speculative decoding with tokenized drafting and byte-level verification. This results in a $2.6\\times$ inference speedup to the standard MambaByte implementation, showing similar decoding efficiency as the subword Mamba. These findings establish the viability of SSMs in enabling token-free language modeling.", "title":"MambaByte: Token-free Selective State Space Model", "authors":[ "Junxiong Wang", "Tushaar Gangavarapu", "Jing Nathan Yan", "Alexander M Rush" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":142 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=W8Rv1jVycX", "bibtext":"@inproceedings{\nravfogel2024descriptionbased,\ntitle={Description-Based Text Similarity},\nauthor={Shauli Ravfogel and Valentina Pyatkin and Amir David Nissan Cohen and Avshalom Manevich and Yoav Goldberg},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=W8Rv1jVycX}\n}", "abstract":"Identifying texts with a given semantics is central for many information seeking scenarios. Similarity search over vector embeddings appear to be central to this ability, yet the similarity reflected in current text embeddings is corpus-driven, and is inconsistent and sub-optimal for many use cases. What, then, is a good notion of similarity for effective retrieval of text?\n\nWe identify the need to search for texts based on abstract descriptions of their content, and the corresponding notion of \\emph{description based similarity}. We demonstrate the inadequacy of current text embeddings and propose an alternative model that significantly improves when used in standard nearest neighbor search. The model is trained using positive and negative pairs sourced through prompting a LLM, demonstrating how data from LLMs can be used for creating new capabilities not immediately possible using the original model.", "title":"Description-Based Text Similarity", "authors":[ "Shauli Ravfogel", "Valentina Pyatkin", "Amir David Nissan Cohen", "Avshalom Manevich", "Yoav Goldberg" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":143 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Vd0KvChLXr", "bibtext":"@inproceedings{\nguo2024generating,\ntitle={Generating Synthetic Datasets for Few-shot Prompt Tuning},\nauthor={Xu Guo and Zilin Du and Boyang Li and Chunyan Miao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Vd0KvChLXr}\n}", "abstract":"A major limitation of prompt tuning is its dependence on large labeled training datasets. Under few-shot learning settings, prompt tuning lags far behind full-model fine-tuning, limiting its scope of application. In this paper, we leverage the powerful LLMs to synthesize task-specific labeled data for training the soft prompts. We first introduce a distribution-aligned weighted generator tuning (DawGen) method to encourage generating in-distribution data that aligns with the few-shot real data. Then, we train soft prompts on both synthetic and real datasets using a gradient surgery approach, which eliminates the conflicting gradients from different data sources. Experiments on seven sentence-pair classification datasets demonstrate the effectiveness of our proposed method for boosting prompt tuning in few-shot learning settings. Results on QQP, MRPC, and SICK datasets are even comparable to the performance of transfer learning from large real-world datasets, showing the promise of synthetic data as an alternative for enhancing soft prompt tuning.", "title":"Generating Synthetic Datasets for Few-shot Prompt Tuning", "authors":[ "Xu Guo", "Zilin Du", "Boyang Li", "Chunyan Miao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":144 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=VWWzO3ewMS", "bibtext":"@inproceedings{\nkhurana2024crowdcalibrator,\ntitle={Crowd-Calibrator: Can Annotator Disagreement Inform Calibration in Subjective Tasks?},\nauthor={Urja Khurana and Eric Nalisnick and Antske Fokkens and Swabha Swayamdipta},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=VWWzO3ewMS}\n}", "abstract":"Subjective tasks in NLP have been mostly relegated to objective standards, where the gold label is decided by taking the majority vote. This obfuscates annotator disagreement and the inherent uncertainty of the label. We argue that subjectivity should factor into model decisions and play a direct role via calibration under a selective prediction setting. Specifically, instead of calibrating confidence purely from the model\u2019s perspective, we calibrate models for subjective tasks based on crowd worker agreement. Our method, Crowd-Calibrator, models the distance between the distribution of crowd worker labels and the model\u2019s own distribution over labels to inform whether the model should abstain from a decision. On two highly subjective tasks, hate speech detection and natural language inference, our experiments show Crowd-Calibrator either outperforms or achieves competitive performance with existing selective prediction baselines. Our findings highlight the value of bringing human decision-making into model predictions.", "title":"Crowd-Calibrator: Can Annotator Disagreement Inform Calibration in Subjective Tasks?", "authors":[ "Urja Khurana", "Eric Nalisnick", "Antske Fokkens", "Swabha Swayamdipta" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.14141", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":145 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=VHhwhmtx3b", "bibtext":"@inproceedings{\nbai2024does,\ntitle={Does Ro{BERT}a Perform Better than {BERT} in Continual Learning: An Attention Sink Perspective},\nauthor={Xueying Bai and Yifan Sun and Niranjan Balasubramanian},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=VHhwhmtx3b}\n}", "abstract":"Continual learning (CL) aims to train models that can sequentially learn new tasks without forgetting previous tasks' knowledge. Although previous works observed that pre-training can benefit CL, it remains unclear whether a pre-trained model with higher downstream capacity also performs better in CL. In this paper, we observe that pre-trained models may allocate high attention scores to some 'sink' tokens, such as [SEP] tokens, which are ubiquitous across various tasks. Such attention sinks may lead to models' over-smoothing in single-task learning and interference in sequential tasks\u2019 learning, which may compromise the models' CL performance despite their high pre-trained capabilities. To reduce these effects, we propose a pre-scaling mechanism that encourages attention diversity across all tokens. Specifically, it first scales the task's attention to the non-sink tokens in a probing stage, and then fine-tunes the model with scaling. Experiments show that pre-scaling yields substantial improvements in CL without experience replay, or progressively storing parameters from previous tasks.", "title":"Does RoBERTa Perform Better than BERT in Continual Learning: An Attention Sink Perspective", "authors":[ "Xueying Bai", "Yifan Sun", "Niranjan Balasubramanian" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":146 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=V7HRrxXUhN", "bibtext":"@inproceedings{\nthakur2024an,\ntitle={An In-Context Learning Agent for Formal Theorem-Proving},\nauthor={Amitayush Thakur and George Tsoukalas and Yeming Wen and Jimmy Xin and Swarat Chaudhuri},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=V7HRrxXUhN}\n}", "abstract":"We present an in-context learning agent for formal theorem-proving in environments like Lean and Coq. Current state-of-the-art models for the problem are finetuned on environment-specific proof data. By contrast, our approach, called COPRA, repeatedly asks a high-capacity, general-purpose large language model (GPT-4) to propose tactic applications from within a stateful backtracking search. Proposed tactics are executed in the underlying proof environment. Feedback from the execution is used to build the prompt for the next model query, along with selected information from the search history and lemmas retrieved from an external database. We evaluate our implementation of COPRA on the miniF2F benchmark for Lean and a set of Coq tasks from the CompCert project. On these benchmarks, COPRA significantly outperforms few-shot invocations of GPT-4. It also compares favorably against finetuning-based approaches, outperforming REPROVER, a state-of-the-art finetuned approach for Lean, in terms of the pass@1 metric. Our code and data are available at https:\/\/github.com\/trishullab\/copra", "title":"An In-Context Learning Agent for Formal Theorem-Proving", "authors":[ "Amitayush Thakur", "George Tsoukalas", "Yeming Wen", "Jimmy Xin", "Swarat Chaudhuri" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/trishullab\/copra" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":147 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=UyNIH6CWHH", "bibtext":"@inproceedings{\nhagemann2024efficient,\ntitle={Efficient Parallelization Layouts for Large-Scale Distributed Model Training},\nauthor={Johannes Hagemann and Samuel Weinbach and Konstantin Dobler and Maximilian Schall and Gerard de Melo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=UyNIH6CWHH}\n}", "abstract":"Efficiently training large language models requires parallelizing across hundreds of hardware accelerators and invoking various compute and memory optimizations. \nWhen combined, many of these strategies have complex interactions regarding the final training efficiency. Prior work tackling this problem did not have access to the latest set of optimizations, such as FlashAttention or sequence parallelism. In this work, we conduct a comprehensive ablation study of possible training configurations for large language models. We distill this large study into several key recommendations for the most efficient training.\nFor instance, we find that using a micro-batch size of 1 usually enables the most efficient training layouts. Larger micro-batch sizes necessitate activation checkpointing or higher degrees of model parallelism and also lead to larger pipeline bubbles.\nOur most efficient configurations enable us to achieve state-of-the-art training efficiency results over a range of model sizes, most notably a Model FLOPs utilization of 70.5% when training a LLaMA 13B model.", "title":"Efficient Parallelization Layouts for Large-Scale Distributed Model Training", "authors":[ "Johannes Hagemann", "Samuel Weinbach", "Konstantin Dobler", "Maximilian Schall", "Gerard de Melo" ], "id":"Conference", "type":"Poster", "arxiv_id":"2311.05610", "GitHub":[ "https:\/\/github.com\/aleph-alpha\/neurips-want-submission-efficient-parallelization-layouts" ], "paper_page":"https:\/\/huggingface.co\/papers\/2311.05610", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":148 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Ukf4301hXm", "bibtext":"@inproceedings{\nzhang2024unforgettable,\ntitle={Unforgettable Generalization in Language Models},\nauthor={Eric Zhang and Leshem Choshen and Jacob Andreas},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Ukf4301hXm}\n}", "abstract":"When language models (LMs) are trained to ``unlearn'' a skill, does this unlearning generalize? We study the behavior of LMs after fine-tuned on data for a target task (e.g. sentiment analysis) in which the labels have been randomized, a popular unlearning method. While LMs consistently learn to generate near-random predictions for individual training examples in the unlearning set, there is extreme variability across tasks in whether LM predictions change on examples outside the unlearning set. In some tasks (like sentiment analysis), unlearning generalizes robustly, and causes models to generate random outputs on all sentiment-type inputs; in other tasks (like physical commonsense reasoning and scientific question answering) unlearning produces almost no generalization at all, and models continue to perform the task accurately even for examples very similar to those that appeared in the training set. Across tasks, we find that dataset difficulty is not predictive of whether a behavior can be unlearned; instead, generalization in unlearning is (weakly) predicted by the confidence of LMs' initial task predictions and the variability of LM representations of unlearning data, with low confidence and low variability both associated with greater generalization. Finally, we show that even generalizable unlearning is shallow: linear probes trained on LMs' representations can still perform tasks reliably after unlearning. Our results highlight the difficulty and unpredictability of performing targeted skill removal from models via fine-tuning.", "title":"Unforgettable Generalization in Language Models", "authors":[ "Eric Zhang", "Leshem Choshen", "Jacob Andreas" ], "id":"Conference", "type":"Poster", "arxiv_id":"2409.02228", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2409.02228", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":149 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Uhwze2LEwq", "bibtext":"@inproceedings{\ndingjie2024milebench,\ntitle={MileBench: Benchmarking {MLLM}s in Long Context},\nauthor={Song Dingjie and Shunian Chen and Guiming Hardy Chen and Fei Yu and Xiang Wan and Benyou Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Uhwze2LEwq}\n}", "abstract":"Despite the rapid progression of Multimodal Large Language Models (MLLMs) and their impressive performance on various benchmarks, the applicability of these results to real-world tasks remains uncertain. \nThis ambiguity primarily stems from the benchmarks' limited consideration for long-context and multi-image tasks, which are critical elements in real-world applications.\nExisting benchmarks often focus on single-image and short-text samples, and when assessing multi-image tasks, they either limit the image count or focus on time-series captioning tasks, potentially masking MLLMs' performance challenges such as hallucination in long-context situations. \nTo address these limitations, we introduce \\textbf{\\dataset}, a pioneering benchmark designed to rigorously test the \\textbf{M}ult\\textbf{I}modal \\textbf{L}ong-cont\\textbf{E}xt capabilities of MLLMs. \nThis benchmark comprises a mix of text and images, long contexts, multiple tasks, and tasks requiring both comprehension and generation. \nWe establish two distinct evaluation sets, diagnostic and realistic, to systematically assess MLLMs' long-context adaptation capacity and their ability to complete tasks in long-context scenarios.\nOur experimental results, garnered from testing 19 models, revealed that while closed-source model GPT-4(Vision) outperforms others, most open-source MLLMs display inadequate performance in long-context situations. \nHence, we strongly encourage an intensification of research efforts towards enhancing MLLMs' long-context capabilities, especially in scenarios involving multiple images.", "title":"MileBench: Benchmarking MLLMs in Long Context", "authors":[ "Song Dingjie", "Shunian Chen", "Guiming Hardy Chen", "Fei Yu", "Xiang Wan", "Benyou Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":150 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=UfqzXg95I5", "bibtext":"@inproceedings{\nliao2024amplegcg,\ntitle={Ample{GCG}: Learning a Universal and Transferable Generative Model of Adversarial Suffixes for Jailbreaking Both Open and Closed {LLM}s},\nauthor={Zeyi Liao and Huan Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=UfqzXg95I5}\n}", "abstract":"\\begin{center} \\textcolor{red}{Warning: This paper contains potentially offensive and harmful text.}\\end{center}\nAs large language models (LLMs) become increasingly prevalent and integrated into autonomous systems, ensuring their safety is imperative. Despite significant strides toward safety alignment, recent work GCG~\\citep{zou2023universal} proposes a discrete tokens optimization algorithm and selects the single suffix with the lowest loss to successfully jailbreak aligned LLMs. In this work, we first discuss the drawbacks of solely picking the suffix with the lowest loss during GCG optimization for jailbreaking and uncover the missed successful suffixes during the intermediate steps.\nMoreover, we utilize those successful suffixes as training data to learn a generative model, named AmpleGCG, which captures the distribution of adversarial suffixes given a harmful query and enables the rapid generation of hundreds of suffixes for any harmful queries in seconds. AmpleGCG achieves near 100\\% attack success rate (ASR) on two aligned LLMs (Llama-2-7B-chat and Vicuna-7B), surpassing two strongest attack baselines. \nMore interestingly, AmpleGCG also transfers seamlessly to attack different models, including closed-source LLMs, achieving a 99\\% ASR on the latest GPT-3.5.\nTo summarize, our work amplifies the impact of GCG by training a generative model of adversarial suffixes that is universal to any harmful queries and transferable from attacking open-source LLMs to closed-source LLMs. Impressively, it can generate 200 adversarial suffixes for one harmful query in only 4 seconds, rendering it more challenging to defend.", "title":"AmpleGCG: Learning a Universal and Transferable Generative Model of Adversarial Suffixes for Jailbreaking Both Open and Closed LLMs", "authors":[ "Zeyi Liao", "Huan Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.07921", "GitHub":[ "https:\/\/github.com\/osu-nlp-group\/amplegcg" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.07921", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":2, "Models":[ "osunlp\/AmpleGCG-llama2-sourced-llama2-7b-chat", "osunlp\/AmpleGCG-llama2-sourced-vicuna-7b", "osunlp\/AmpleGCG-llama2-sourced-vicuna-7b13b-guanaco-7b13b", "osunlp\/AmpleGCG-plus-llama2-sourced-llama2-7b-chat", "osunlp\/AmpleGCG-plus-llama2-sourced-vicuna-7b13b-guanaco-7b13b" ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":151 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=UfWwBaLuXV", "bibtext":"@inproceedings{\nyan2024list,\ntitle={List Items One by One: A New Data Source and Learning Paradigm for Multimodal {LLM}s},\nauthor={An Yan and Zhengyuan Yang and Junda Wu and Wanrong Zhu and Jianwei Yang and Linjie Li and Kevin Lin and Jianfeng Wang and Julian McAuley and Jianfeng Gao and Lijuan Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=UfWwBaLuXV}\n}", "abstract":"Set-of-Mark (SoM) Prompting unleashes the visual grounding capability of GPT-4V, by enabling the model to associate visual objects with tags inserted on the image. These tags, marked with alphanumerics, can be indexed via text tokens for easy reference. Despite the extraordinary performance from GPT-4V, we observe that other Multimodal Large Language Models (MLLMs) struggle to understand these visual tags. To promote the learning of SoM prompting for open-source models, we propose a new learning paradigm: list items one by one, which asks the model to enumerate and describe all visual tags placed on the image following the alphanumeric order of tags. By integrating our synthetic dataset with other visual instruction tuning datasets, we are able to equip existing MLLMs with the SoM prompting ability. Furthermore, we evaluate our finetuned SoM models on seven MLLM benchmarks. We find that this new dataset, even in a relatively small size (10k-30k images with tags), significantly enhances visual reasoning capabilities and reduces hallucinations for MLLMs. Perhaps surprisingly, these improvements persist even when the visual tags are omitted from input images during inference. This suggests the potential of ``list items one by one'' as a new paradigm for training MLLMs, which strengthens the object-text alignment through the use of visual tags in the training stage. Finally, we conduct analyses by probing trained models to understand the working mechanism of SoM. Our code and data are available at https:\/\/github.com\/zzxslp\/SoM-LLaVA.", "title":"List Items One by One: A New Data Source and Learning Paradigm for Multimodal LLMs", "authors":[ "An Yan", "Zhengyuan Yang", "Junda Wu", "Wanrong Zhu", "Jianwei Yang", "Linjie Li", "Kevin Lin", "Jianfeng Wang", "Julian McAuley", "Jianfeng Gao", "Lijuan Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.16375", "GitHub":[ "https:\/\/github.com\/zzxslp\/som-llava" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.16375", "n_linked_authors":9, "upvotes":16, "num_comments":2, "n_authors":11, "Models":[ "zzxslp\/som-llava-v1.5-13b", "zzxslp\/som-llava-v1.5-13b-hf" ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":152 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=UPyWLwciYz", "bibtext":"@inproceedings{\nkhalifa2024sourceaware,\ntitle={Source-Aware Training Enables Knowledge Attribution in Language Models},\nauthor={Muhammad Khalifa and David Wadden and Emma Strubell and Honglak Lee and Lu Wang and Iz Beltagy and Hao Peng},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=UPyWLwciYz}\n}", "abstract":"Large language models (LLMs) learn a vast amount of knowledge during pretraining, but they are often oblivious to the source(s) of such knowledge. We investigate the problem of intrinsic source citation, where LLMs are required to cite the pretraining source supporting a generated response. Intrinsic source citation can enhance LLM transparency, interpretability, and verifiability. To give LLMs such ability,\nwe explore source-aware training---a recipe that involves (i) training the LLM to associate unique source document identifiers with the knowledge in each document, followed by (ii) an instruction-tuning stage to teach the LLM to cite a supporting pretraining source when prompted. Source-aware training borrows from existing pretraining\/fine-tuning frameworks and requires minimal changes to the model architecture or implementation. Through experiments on synthetic data, we demonstrate that our training recipe can enable faithful attribution to the pretraining data without a substantial impact on the model's perplexity compared to standard pretraining. Our findings also highlight the importance of pretraining data augmentation in achieving attribution.", "title":"Source-Aware Training Enables Knowledge Attribution in Language Models", "authors":[ "Muhammad Khalifa", "David Wadden", "Emma Strubell", "Honglak Lee", "Lu Wang", "Iz Beltagy", "Hao Peng" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mukhal\/intrinsic-source-citation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":153 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=UPE6WYE8vg", "bibtext":"@inproceedings{\nmao2024a,\ntitle={A Language Agent for Autonomous Driving},\nauthor={Jiageng Mao and Junjie Ye and Yuxi Qian and Marco Pavone and Yue Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=UPE6WYE8vg}\n}", "abstract":"Human-level driving is an ultimate goal of autonomous driving. Conventional approaches formulate autonomous driving as a perception-prediction-planning framework, yet their systems do not capitalize on the inherent reasoning ability and experiential knowledge of humans. In this paper, we propose a fundamental paradigm shift from current pipelines, exploiting Large Language Models (LLMs) as a cognitive agent to integrate human-like intelligence into autonomous driving systems. Our system, termed Agent-Driver, transforms the traditional autonomous driving pipeline by introducing a versatile tool library accessible via function calls, a cognitive memory of common sense and experiential knowledge for decision-making, and a reasoning engine capable of chain-of-thought reasoning, task planning, motion planning, and self-reflection. Powered by LLMs, our Agent-Driver is endowed with intuitive common sense and robust reasoning capabilities, thus enabling a more nuanced, human-like approach to autonomous driving. We evaluate our system on both open-loop and close-loop driving challenges, and extensive experiments substantiate that our Agent-Driver significantly outperforms the state-of-the-art driving methods by a large margin. Our approach also demonstrates superior interpretability and few-shot learning ability to these methods.", "title":"A Language Agent for Autonomous Driving", "authors":[ "Jiageng Mao", "Junjie Ye", "Yuxi Qian", "Marco Pavone", "Yue Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2311.10813", "GitHub":[ "https:\/\/github.com\/usc-gvl\/agent-driver" ], "paper_page":"https:\/\/huggingface.co\/papers\/2311.10813", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":154 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=U5BUzSn4tD", "bibtext":"@inproceedings{\nhu2024auxiliary,\ntitle={Auxiliary task demands mask the capabilities of smaller language models},\nauthor={Jennifer Hu and Michael Frank},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=U5BUzSn4tD}\n}", "abstract":"Developmental psychologists have argued about when cognitive capacities such as language understanding or theory of mind emerge. These debates often hinge on the concept of \"task demands\" -- the auxiliary challenges associated with performing a particular evaluation -- that may mask the child\u2019s underlying ability. The same issues arise when measuring the capacities of language models (LMs): performance on a task is a function of the model's underlying knowledge, combined with the model\u2019s ability to interpret and perform the task given its available resources. Here, we show that for analogical reasoning, reflective reasoning, word prediction, and grammaticality judgments, evaluation methods with greater task demands yield lower performance than evaluations with reduced demands. This \"demand gap\" is most pronounced for models with fewer parameters and less training data. Our results illustrate that LM performance should not be interpreted as a direct indication of intelligence (or lack thereof), but as a reflection of capacities seen through the lens of researchers' design choices.", "title":"Auxiliary task demands mask the capabilities of smaller language models", "authors":[ "Jennifer Hu", "Michael Frank" ], "id":"Conference", "type":"Oral", "arxiv_id":"2404.02418", "GitHub":[ "https:\/\/github.com\/jennhu\/lm-task-demands" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":155 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=TrloAXEJ2B", "bibtext":"@inproceedings{\nhuang2024lorahub,\ntitle={LoraHub: Efficient Cross-Task Generalization via Dynamic Lo{RA} Composition},\nauthor={Chengsong Huang and Qian Liu and Bill Yuchen Lin and Tianyu Pang and Chao Du and Min Lin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=TrloAXEJ2B}\n}", "abstract":"Low-rank adaptation (LoRA) is often employed to fine-tune large language models (LLMs) for new tasks. This paper investigates LoRA composability for cross-task generalization and introduces LoraHub, a simple framework devised for the purposive assembly of LoRA modules trained on diverse given tasks, with the objective of achieving adaptable performance on unseen tasks. With just a few examples from a new task, LoraHub can fluidly combine multiple LoRA modules, eliminating the need for human expertise and assumptions. Notably, the composition requires neither additional model parameters nor gradients. Empirical results on the Big-Bench Hard benchmark suggest that LoraHub, while not surpassing the performance of in-context learning, offers a notable performance-efficiency trade-off in few-shot scenarios by employing a significantly reduced number of tokens per example during inference. Notably, LoraHub establishes a better upper bound compared to in-context learning when paired with different demonstration examples, demonstrating its potential for future development. Our vision is to establish a platform for LoRA modules, empowering users to share their trained LoRA modules. This collaborative approach facilitates the seamless application of LoRA modules to novel tasks, contributing to an adaptive ecosystem.", "title":"LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition", "authors":[ "Chengsong Huang", "Qian Liu", "Bill Yuchen Lin", "Tianyu Pang", "Chao Du", "Min Lin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2307.13269", "GitHub":[ "https:\/\/github.com\/sail-sg\/lorahub" ], "paper_page":"https:\/\/huggingface.co\/papers\/2307.13269", "n_linked_authors":6, "upvotes":31, "num_comments":2, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ "sail\/lorahub" ], "paper_page_exists_pre_conf":1, "unique_id":156 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Ti67584b98", "bibtext":"@inproceedings{\nrein2024gpqa,\ntitle={{GPQA}: A Graduate-Level Google-Proof Q\\&A Benchmark},\nauthor={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Ti67584b98}\n}", "abstract":"We present GPQA, a challenging dataset of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry.\nWe ensure that the questions are high-quality and extremely difficult: experts who have or are pursuing PhDs in the corresponding domains reach 65\\% accuracy (74\\% when discounting clear mistakes the experts identified in retrospect), while highly skilled non-expert validators only reach 34\\% accuracy, despite spending on average over 30 minutes with unrestricted access to the web (i.e., the questions are \"Google-proof\"). When we released this dataset in November 2023, GPT-4 achieved 39\\% accuracy. As of March 2024, Claude 3 Opus achieves a reported score of approximately 60\\%, highlighting the rapid pace of progress in AI. If we are to use future AI systems to help us answer very hard questions\u2014for example, when developing new scientific knowledge\u2014we need to develop scalable oversight methods that enable humans to supervise their outputs, which may be difficult even if the supervisors are themselves skilled and knowledgeable. The difficulty of GPQA for skilled non-experts should enable realistic scalable oversight experiments, which we hope can help devise ways for human experts to reliably get truthful information from AI systems that surpass human capabilities.", "title":"GPQA: A Graduate-Level Google-Proof Q A Benchmark", "authors":[ "David Rein", "Betty Li Hou", "Asa Cooper Stickland", "Jackson Petty", "Richard Yuanzhe Pang", "Julien Dirani", "Julian Michael", "Samuel R. Bowman" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/idavidrein\/gpqa" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":157 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=TZ0CCGDcuT", "bibtext":"@inproceedings{\nhanna2024have,\ntitle={Have Faith in Faithfulness: Going Beyond Circuit Overlap When Finding Model Mechanisms},\nauthor={Michael Hanna and Sandro Pezzelle and Yonatan Belinkov},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=TZ0CCGDcuT}\n}", "abstract":"Many recent language model (LM) interpretability studies have adopted the circuits framework, which aims to find the minimal computational subgraph, or circuit, that explains LM behavior on a given task. Most studies determine which edges belong in a LM's circuit for a task by performing causal interventions on each edge independently, but this scales poorly with model size. As a solution, recent work has proposed edge attribution patching (EAP), a scalable gradient-based approximation to interventions. In this paper, we introduce a new method - EAP with integrated gradients (EAP-IG) - that aims to efficiently find circuits while better maintaining one of their core properties: faithfulness. A circuit is faithful if all model edges outside the circuit can be ablated without changing the model's behavior on the task; faithfulness is what justifies studying circuits, rather than the full model. Our experiments demonstrate that circuits found using EAP-IG are more faithful than those found using EAP, even though both have high node overlap with reference circuits found using causal interventions. We conclude more generally that when comparing circuits, measuring overlap is no substitute for measuring faithfulness.", "title":"Have Faith in Faithfulness: Going Beyond Circuit Overlap When Finding Model Mechanisms", "authors":[ "Michael Hanna", "Sandro Pezzelle", "Yonatan Belinkov" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.17806", "GitHub":[ "https:\/\/github.com\/hannamw\/eap-ig" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.17806", "n_linked_authors":1, "upvotes":3, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":158 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=TRxQMpLUfD", "bibtext":"@inproceedings{\nyauney2024stronger,\ntitle={Stronger Random Baselines for In-Context Learning},\nauthor={Gregory Yauney and David Mimno},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=TRxQMpLUfD}\n}", "abstract":"Evaluating the in-context learning classification performance of language models poses challenges due to small dataset sizes, extensive prompt-selection using the validation set, and intentionally difficult tasks that lead to near-random performance. The standard random baseline--the expected accuracy of guessing labels uniformly at random--is stable when the evaluation set is used only once or when the dataset is large. We account for the common practice of validation set reuse and existing small datasets with a stronger random baseline: the expected maximum accuracy across multiple random classifiers. When choosing the best prompt demonstrations across six quantized language models applied to 16 BIG-bench Lite tasks, more than 20% of the few-shot results that exceed the standard baseline do not exceed this stronger random baseline. When held-out test sets are available, this stronger baseline is also a better predictor of held-out performance than the standard baseline, avoiding unnecessary test set evaluations. This maximum random baseline provides an easily calculated drop-in replacement for the standard baseline.", "title":"Stronger Random Baselines for In-Context Learning", "authors":[ "Gregory Yauney", "David Mimno" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.13020", "GitHub":[ "https:\/\/github.com\/gyauney\/max-random-baseline" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":159 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=TQdd1VhWbe", "bibtext":"@inproceedings{\nfujii2024continual,\ntitle={Continual Pre-Training for Cross-Lingual {LLM} Adaptation: Enhancing Japanese Language Capabilities},\nauthor={Kazuki Fujii and Taishi Nakamura and Mengsay Loem and Hiroki Iida and Masanari Ohi and Kakeru Hattori and Hirai Shota and Sakae Mizuki and Rio Yokota and Naoaki Okazaki},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=TQdd1VhWbe}\n}", "abstract":"Cross-lingual continual pre-training of large language models (LLMs) initially trained on English corpus allows us to leverage the vast amount of English language resources and reduce the pre-training cost. In this study, we constructed Swallow, an LLM with enhanced Japanese capability, by extending the vocabulary of Llama 2 to include Japanese characters and conducting continual pre-training on a large Japanese web corpus. Experimental results confirmed that the performance on Japanese tasks drastically improved through continual pre-training, and the performance monotonically increased with the amount of training data up to 100B tokens. Consequently, Swallow achieved superior performance compared to other LLMs that were trained from scratch in English and Japanese. An analysis of the effects of continual pre-training revealed that it was particularly effective for Japanese question answering tasks. Furthermore, to elucidate effective methodologies for cross-lingual continual pre-training from English to Japanese, we investigated the impact of vocabulary expansion and the effectiveness of incorporating parallel corpora. The results showed that the efficiency gained through vocabulary expansion had no negative impact on performance, except for the summarization task, and that the combined use of parallel corpora enhanced translation ability.", "title":"Continual Pre-Training for Cross-Lingual LLM Adaptation: Enhancing Japanese Language Capabilities", "authors":[ "Kazuki Fujii", "Taishi Nakamura", "Mengsay Loem", "Hiroki Iida", "Masanari Ohi", "Kakeru Hattori", "Hirai Shota", "Sakae Mizuki", "Rio Yokota", "Naoaki Okazaki" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.17790", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.17790", "n_linked_authors":2, "upvotes":5, "num_comments":0, "n_authors":10, "Models":[ "tokyotech-llm\/Swallow-7b-instruct-hf", "tokyotech-llm\/Swallow-70b-instruct-hf", "tokyotech-llm\/Swallow-13b-instruct-hf", "tokyotech-llm\/Swallow-7b-hf", "tokyotech-llm\/Swallow-13b-hf", "tokyotech-llm\/Swallow-7b-plus-hf", "tokyotech-llm\/Swallow-70b-hf", "tokyotech-llm\/Swallow-7b-NVE-instruct-hf", "tokyotech-llm\/Swallow-70b-NVE-instruct-hf", "tokyotech-llm\/Swallow-7b-NVE-hf", "tokyotech-llm\/Swallow-70b-NVE-hf", "tokyotech-llm\/Swallow-13b-NVE-hf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-4bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-8bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-4bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-8bits", "RichardErkhov\/tokyotech-llm_-_Swallow-13b-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-70b-NVE-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-70b-instruct-v0.1-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-13b-instruct-v0.1-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-hf-gguf" ], "Datasets":[ ], "Spaces":[ "hayas\/Swallow-13B-instruct", "mmnga\/vocabviewer", "kmero\/tokyotech-llm-Swallow-70b-instruct-hf", "isonuma\/marutenbo", "Huaibo\/tokyotech-llm-Swallow-7b-instruct-hf" ], "paper_page_exists_pre_conf":1, "unique_id":160 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=TBNYjdOazs", "bibtext":"@inproceedings{\nkim2024decoupling,\ntitle={Decoupling Noise and Toxic Parameters for Language Model Detoxification by Task Vector Merging},\nauthor={Yongmin Kim and Takeshi Kojima and Yusuke Iwasawa and Yutaka Matsuo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=TBNYjdOazs}\n}", "abstract":"The goal of detoxifying language models is to reduce the chances of producing offensive or harmful output in pre-trained language models (PLMs), ensuring their safer use. A recently proposed detoxification method utilizes the task vector obtained by subtraction from the fine-tuned model on toxic datasets to the pre-trained model. This approach has shown effectiveness for detoxification but still suffers from degradation. This study focuses on further mitigating degradation while maintaining detoxification performance. To mitigate the degradation, we propose a method that detoxifies the PLMs by fine-tuning multiple models on split toxic datasets and by merging the subtracted task vectors. \nWe conducted experiments on two toxic datasets (Civil Comments and Toxigen) with five PLMs (GPT2-small, GPT2-medium, GPT2-large, Phi-1.5, and Llama2-7b), demonstrating that our method consistently achieves a lower toxicity score while preventing the degradation compared to baseline methods.\nEspecially, with the GPT2-small model on the Toxigen dataset, degradation was reduced by 38.9\\% compared to that of an existing task vector method while maintaining a similar toxicity score.\nIn addition, we found that merging multiple detoxified models tends to increase the number of parameters that remained almost unchanged from the pre-trained model.\nWe assume that by merging multiple detoxified models, \"decoupling noise and toxic parameters\" is implicitly achieved. The accidental noise in the parameter shift unrelated to detoxification disappears by averaging noise, whereas the parameter shift associated with detoxification is maintained.\nWe hope that the findings of this study will be applied not only to detoxification but also to many other research domains that seek to suppress undesirable outputs of language models.", "title":"Decoupling Noise and Toxic Parameters for Language Model Detoxification by Task Vector Merging", "authors":[ "Yongmin Kim", "Takeshi Kojima", "Yusuke Iwasawa", "Yutaka Matsuo" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":161 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=T9cOYH0wGF", "bibtext":"@inproceedings{\nram{\\'\\i}rez2024optimising,\ntitle={Optimising Calls to Large Language Models with Uncertainty-Based Two-Tier Selection},\nauthor={Guillem Ram{\\'\\i}rez and Alexandra Birch and Ivan Titov},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=T9cOYH0wGF}\n}", "abstract":"Researchers and practitioners operating on a limited budget face the well-known cost-performance trade-off dilemma. The challenging decision often centers on whether to use a large LLM with better performance or a smaller one with reduced costs. This has motivated recent research in the optimisation of LLM calls. Either a cascading strategy is used, where a smaller LLM or both are called causally, or a routing strategy is used, where only one model is ever called. This is dependent on a decision criterion in both scenarios which is typically an auxiliary neural model. In this work, we propose a cost-effective solution; we use only the uncertainty of the generations of the small LLM as the decision criterion. We compare our approach with both cascading and routing strategies using three different pairs of pre-trained small and large LLMs, on nine different tasks and against approaches that require an additional neural model. Our experiments reveal this simple solution optimally balances cost and performance, outperforming existing methods on 25 out of 27 experimental setups.", "title":"Optimising Calls to Large Language Models with Uncertainty-Based Two-Tier Selection", "authors":[ "Guillem Ram\u00edrez", "Alexandra Birch", "Ivan Titov" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.02134", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":162 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=T5pGDydMkS", "bibtext":"@inproceedings{\nou2024adaptive,\ntitle={Adaptive Quantization Error Reconstruction for {LLM}s with Mixed Precision},\nauthor={Lin Ou and Jinpeng Xia and Yuewei Zhang and Chuzhan Hao and Hao Henry Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=T5pGDydMkS}\n}", "abstract":"Large language models (LLMs) has demonstrated superior performance on various downstream tasks. However, their practical applications are hindered by their immense memory and computation requirements. Although recent post-training quantization methods can effectively reduce memory usage and improve computational efficiency, they often overlook the varying sensitivity of different layer weights to bit precision. Additionally, the previous methods suffer from significant accuracy loss under low-bit quantization (2-3 bits). To address these limitations, we propose Adaptive Mixed Precision and Low-Rank Quantization Error Reconstruction for LLMs (AMLQ), which achieves state-of-the-art performance under the approximate average bit precision overall. Furthermore, we introduce the low-rank decomposition to reconstruct quantization error based on the output features. Experimental results demonstrate that this method can be effectively combined with various quantization techniques and bring considerable performance gains. Our approach comprehensively considers model performance and inference efficiency, offering more than 3$\\times$ speedup over the FP16 execution.", "title":"Adaptive Quantization Error Reconstruction for LLMs with Mixed Precision", "authors":[ "Lin Ou", "Jinpeng Xia", "Yuewei Zhang", "Chuzhan Hao", "Hao Henry Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":163 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Szp33itD10", "bibtext":"@inproceedings{\nli2024styletalker,\ntitle={StyleTalker: Finetuning Audio Language Model and Style-Based Text-to-Speech Model for Fast Spoken Dialogue Generation},\nauthor={Yinghao Aaron Li and Xilin Jiang and Jordan Darefsky and Ge Zhu and Nima Mesgarani},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Szp33itD10}\n}", "abstract":"The rapid advancement of large language models (LLMs) has significantly propelled the development of text-based chatbots, demonstrating their capability to engage in coherent and contextually relevant dialogues. However, extending these advancements to enable end-to-end speech-to-speech conversation bots remains a formidable challenge, primarily due to the extensive dataset and computational resources required. The conventional approach of cascading automatic speech recognition (ASR), LLM, and text-to-speech (TTS) models in a pipeline, while effective, suffers from unnatural prosody because it lacks direct interactions between the input audio and its transcribed text and the output audio. These systems are also limited by their inherent latency from the ASR process for real-time applications. This paper introduces Style-Talker, an innovative framework that fine-tunes an audio LLM alongside a style-based TTS model for fast spoken dialog generation. Style-Talker takes user input audio and uses transcribed chat history and speech styles to generate both the speaking style and text for the response. Subsequently, the TTS model synthesizes the speech, which is then played back to the user. While the response speech is being played, the input speech undergoes ASR processing to extract the transcription and speaking style, serving as the context for the ensuing dialogue turn. This novel pipeline accelerates the traditional cascade ASR-LLM-TTS systems while integrating rich paralinguistic information from input speech. Our experimental results show that Style-Talker significantly outperforms the conventional cascade and speech-to-speech baselines in terms of both dialogue naturalness and coherence while being more than 50\\% faster. The demo and code are available at https:\/\/styletalker.github.io\/.", "title":"StyleTalker: Finetuning Audio Language Model and Style-Based Text-to-Speech Model for Fast Spoken Dialogue Generation", "authors":[ "Yinghao Aaron Li", "Xilin Jiang", "Jordan Darefsky", "Ge Zhu", "Nima Mesgarani" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":164 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=SwUsFTtM9h", "bibtext":"@inproceedings{\nnaseh2024iteratively,\ntitle={Iteratively Prompting Multimodal {LLM}s to Reproduce Natural and {AI}-Generated Images},\nauthor={Ali Naseh and Katherine Thai and Mohit Iyyer and Amir Houmansadr},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=SwUsFTtM9h}\n}", "abstract":"With the digital imagery landscape rapidly evolving, image stocks and AI-generated image marketplaces have become central to visual media. Traditional stock images now exist alongside innovative platforms that trade in prompts for AI-generated visuals, driven by sophisticated APIs like DALL-E 3 and Midjourney. This paper studies the possibility of employing multi-modal models with enhanced visual understanding to mimic the outputs of these platforms, introducing an original attack strategy. Our method leverages fine-tuned CLIP models, a multi-label classifier, and the descriptive capabilities of GPT-4V to create prompts that generate images similar to those available in marketplaces and from premium stock image providers, yet at a markedly lower expense. In presenting this strategy, we aim to spotlight a new class of economic and security considerations within the realm of digital imagery. Our findings, supported by both automated metrics and human assessment, reveal that comparable visual content can be produced for a fraction of the prevailing market prices (\\$0.23 - \\$0.27 per image), emphasizing the need for awareness and strategic discussions about the integrity of digital media in an increasingly AI-integrated landscape. Additionally, this approach holds promise as a tool for data augmentation, potentially enhancing machine learning models by providing varied and cost-effective training data. Our work also contributes to the field by assembling a dataset consisting of approximately 19 million prompt-image pairs generated by the popular Midjourney platform, which we plan to release publicly.", "title":"Iteratively Prompting Multimodal LLMs to Reproduce Natural and AI-Generated Images", "authors":[ "Ali Naseh", "Katherine Thai", "Mohit Iyyer", "Amir Houmansadr" ], "id":"Conference", "type":"Oral", "arxiv_id":"2404.13784", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":165 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=SHMj84U5SH", "bibtext":"@inproceedings{\nhuang2024compression,\ntitle={Compression Represents Intelligence Linearly},\nauthor={Yuzhen Huang and Jinghan Zhang and Zifei Shan and Junxian He},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=SHMj84U5SH}\n}", "abstract":"There is a belief that learning to compress well will lead to intelligence. Recently, language modeling has been shown to be equivalent to compression, which offers a compelling rationale for the success of large language models (LLMs): development of more advanced language models is essentially enhancing compression which facilitates intelligence. Despite such appealing discussions, little empirical evidence is present for the interplay between compression and intelligence. In this work, we examine the relationship between compression and intelligence in the context of LLMs, treating LLMs as data compressors. Given the abstract concept of \"intelligence\", we adopt the average downstream benchmark scores as a surrogate, specifically targeting intelligence related to knowledge and commonsense, coding, and mathematical reasoning. Across 12 benchmarks, our study brings together 31 public LLMs that vary in size and originate from diverse organizations. Remarkably, we find that LLMs' intelligence -- reflected by benchmark scores -- almost **linearly** correlates with their ability to compress external text corpora. These results provide concrete evidence supporting the belief that superior compression indicates greater intelligence. Furthermore, our findings suggest that compression efficiency, as an unsupervised metric derived from raw text corpora, serves as a reliable evaluation measure that is linearly associated with the model capabilities. This work advocates for the adoption of compression performance as a stable, flexible, and reliable metric for evaluating LLMs. We open-source our compression datasets as well as our data collection pipelines to facilitate future researchers to assess compression properly.", "title":"Compression Represents Intelligence Linearly", "authors":[ "Yuzhen Huang", "Jinghan Zhang", "Zifei Shan", "Junxian He" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.09937", "GitHub":[ "https:\/\/github.com\/hkust-nlp\/llm-compression-intelligence" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.09937", "n_linked_authors":3, "upvotes":27, "num_comments":1, "n_authors":4, "Models":[ ], "Datasets":[ "hkust-nlp\/llm-compression" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":166 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=SGoVIC0u0f", "bibtext":"@inproceedings{\nlehnert2024beyond,\ntitle={Beyond A*: Better Planning with Transformers via Search Dynamics Bootstrapping},\nauthor={Lucas Lehnert and Sainbayar Sukhbaatar and DiJia Su and Qinqing Zheng and Paul McVay and Michael Rabbat and Yuandong Tian},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=SGoVIC0u0f}\n}", "abstract":"While Transformers have enabled tremendous progress in various application settings, such architectures still struggle with solving planning and sequential decision-making tasks. In this work, we demonstrate how to train Transformers to solve complex planning tasks. This is accomplished by first designing a synthetic language that captures the computation performed by the $A^*$ search algorithm when solving a planning task. Then, an encoder-decoder Transformer model is trained to predict this language, resulting in a language model that can correctly solve novel planning tasks by generating $A^*$'s search dynamics. We fine tune this model to obtain a Searchformer, a Transformer model that optimally solves previously unseen Sokoban puzzles 93.7\\% of the time, while using up to 26.8\\% fewer search steps than our $A^*$ reference implementation. Searchformer significantly outperforms baselines that predict the optimal plan directly with a 5-10$\\times$ smaller model size and a 10$\\times$ smaller training dataset. Lastly, we demonstrate how Searchformer scales to larger and more complex decision making tasks with improved percentage of solved tasks and shortened search dynamics.", "title":"Beyond A*: Better Planning with Transformers via Search Dynamics Bootstrapping", "authors":[ "Lucas Lehnert", "Sainbayar Sukhbaatar", "DiJia Su", "Qinqing Zheng", "Paul McVay", "Michael Rabbat", "Yuandong Tian" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/facebookresearch\/searchformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":167 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=S7NVVfuRv8", "bibtext":"@inproceedings{\nwu2024how,\ntitle={How Easily do Irrelevant Inputs Skew the Responses of Large Language Models?},\nauthor={Siye Wu and Jian Xie and Jiangjie Chen and Tinghui Zhu and Kai Zhang and Yanghua Xiao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=S7NVVfuRv8}\n}", "abstract":"By leveraging the retrieval of information from external knowledge databases, Large Language Models (LLMs) exhibit enhanced capabilities for accomplishing many knowledge-intensive tasks.\nHowever, due to the inherent flaws of current retrieval systems, there might exist irrelevant information within those retrieving top-ranked passages.\nIn this work, we present a comprehensive investigation into the robustness of LLMs to different types of irrelevant information under various conditions.\nWe initially introduce a framework to construct high-quality irrelevant information that ranges from semantically unrelated, partially related, and related to questions.\nFurthermore, our analysis demonstrates that the constructed irrelevant information not only scores highly on similarity metrics, being highly retrieved by existing systems, but also bears semantic connections to the context.\nOur investigation reveals that current LLMs still face challenges in discriminating highly semantically related information and can be easily distracted by these irrelevant yet misleading content. \nBesides, we also find that current solutions for handling irrelevant information have limitations in improving the robustness of LLMs to such distractions.\nAll the resources are available on [GitHub](https:\/\/github.com\/Di-viner\/LLM-Robustness-to-Irrelevant-Information).", "title":"How Easily do Irrelevant Inputs Skew the Responses of Large Language Models?", "authors":[ "Siye Wu", "Jian Xie", "Jiangjie Chen", "Tinghui Zhu", "Kai Zhang", "Yanghua Xiao" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.03302", "GitHub":[ "https:\/\/github.com\/di-viner\/llm-robustness-to-irrelevant-information" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.03302", "n_linked_authors":1, "upvotes":2, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":168 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=S4ZOkV1AHl", "bibtext":"@inproceedings{\nkwok2024evaluating,\ntitle={Evaluating Cultural Adaptability of a Large Language Model via Simulation of Synthetic Personas},\nauthor={Louis Kwok and Michal Bravansky and Lewis Griffin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=S4ZOkV1AHl}\n}", "abstract":"The success of Large Language Models (LLMs) in multicultural environments hinges on their ability to understand users' diverse cultural backgrounds. We measure this capability by having an LLM simulate human profiles representing various nationalities within the scope of a questionnaire-style psychological experiment. Specifically, we employ GPT-3.5 to reproduce reactions to persuasive news articles of 7,286 participants from 15 countries; comparing the results with a dataset of real participants sharing the same demographic traits. Our analysis shows that specifying a person's country of residence improves GPT-3.5's alignment with their responses. In contrast, using native language prompting introduces shifts that significantly reduce overall alignment, with some languages particularly impairing performance. These findings suggest that while direct nationality information enhances the model's cultural adaptability, native language cues do not reliably improve simulation fidelity and can detract from the model's effectiveness.", "title":"Evaluating Cultural Adaptability of a Large Language Model via Simulation of Synthetic Personas", "authors":[ "Louis Kwok", "Michal Bravansky", "Lewis Griffin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.06929", "GitHub":[ "https:\/\/github.com\/louiskwoklf\/llms-cultural-adaptability" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":169 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=S1XnUsqwr7", "bibtext":"@inproceedings{\nzhu2024deductive,\ntitle={Deductive Beam Search: Decoding Deducible Rationale for Chain-of-Thought Reasoning},\nauthor={Tinghui Zhu and Kai Zhang and Jian Xie and Yu Su},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=S1XnUsqwr7}\n}", "abstract":"Recent advancements have significantly augmented the reasoning capabilities of Large Language Models (LLMs) through various methodologies, especially chain-of-thought (CoT) reasoning. However, previous methods often struggle to address reasoning errors in intermediate steps, which can lead to accumulative errors. In this paper, we propose Deductive Beam Search (DBS), which seamlessly integrates CoT and deductive reasoning with step-wise beam search for LLMs. Our approach deploys a verifier, verifying the deducibility of a reasoning step and its premises, thus alleviating the error accumulation. Furthermore, we introduce a scalable and labor-free data construction method to amplify our model\u2019s verification capabilities. Extensive experiments demonstrate that our approach significantly enhances the base performance of LLMs of various scales (7B, 13B, 70B, and ChatGPT) across 8 reasoning datasets from 3 diverse reasoning genres, including arithmetic, commonsense, and symbolic. Moreover, our analysis proves DBS\u2019s capability of detecting diverse and subtle reasoning errors and robustness on different model scales.", "title":"Deductive Beam Search: Decoding Deducible Rationale for Chain-of-Thought Reasoning", "authors":[ "Tinghui Zhu", "Kai Zhang", "Jian Xie", "Yu Su" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.17686", "GitHub":[ "https:\/\/github.com\/osu-nlp-group\/deductive-beam-search" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.17686", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":170 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Rx3wC8sCTJ", "bibtext":"@inproceedings{\nross2024llm,\ntitle={{LLM} economicus? Mapping the Behavioral Biases of {LLM}s via Utility Theory},\nauthor={Jillian Ross and Yoon Kim and Andrew Lo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Rx3wC8sCTJ}\n}", "abstract":"Humans are not homo economicus (i.e., rational economic beings). As humans, we exhibit systematic behavioral biases such as loss aversion, anchoring, framing, etc., which lead us to make suboptimal economic decisions. Insofar as such biases may be embedded in text data on which large language models (LLMs) are trained, to what extent are LLMs prone to the same behavioral biases? Understanding these biases in LLMs is crucial for deploying LLMs to support human decision-making. We propose utility theory-a paradigm at the core of modern economic theory-as an approach to evaluate the economic biases of LLMs. Utility theory enables the quantification and comparison of economic behavior against benchmarks such as perfect rationality or human behavior. To demonstrate our approach, we quantify and compare the economic behavior of a variety of open- and closed-source LLMs. We find that the economic behavior of current LLMs is neither entirely human-like nor entirely economicus-like. We also find that most current LLMs struggle to maintain consistent economic behavior across settings. Finally, we illustrate how our approach can measure the effect of interventions such as prompting on economic biases.", "title":"LLM economicus? Mapping the Behavioral Biases of LLMs via Utility Theory", "authors":[ "Jillian Ross", "Yoon Kim", "Andrew Lo" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.02784", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":171 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=RLFca3arx7", "bibtext":"@inproceedings{\ngupta2024calm,\ntitle={{CALM} : A Multi-task Benchmark for Comprehensive Assessment of Language Model Bias},\nauthor={Vipul Gupta and Pranav Narayanan Venkit and Hugo Lauren{\\c{c}}on and Shomir Wilson and Rebecca J. Passonneau},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=RLFca3arx7}\n}", "abstract":"As language models (LMs) become increasingly powerful and widely used, it is important to quantify them for sociodemographic bias with potential for harm. Prior measures of bias are sensitive to perturbations in the templates designed to compare performance across social groups, due to factors such as low diversity or limited number of templates. Also, most previous work considers only one NLP task. We introduce Comprehensive Assessment of Language Models (CALM) for robust measurement of social biases. We use sixteen datasets for question-answering, sentiment analysis and natural language inference and filter them to produce 224 templates with high diversity (e.g., length, vocabulary). This helps us create a novel dataset of 78,400 prompts covering the three NLP tasks. Our empirical evaluation shows that CALM bias scores are more robust and far less sensitive than previous bias measurements to perturbations in the templates, such as synonym substitution, or to random subset selection of templates. We apply CALM to 20 large language models, and find that for 2 LM series, larger parameter models tend to be more biased than smaller ones. The T0 series is the least biased model families, of the 20 LLMs investigated here.", "title":"CALM : A Multi-task Benchmark for Comprehensive Assessment of Language Model Bias", "authors":[ "Vipul Gupta", "Pranav Narayanan Venkit", "Hugo Lauren\u00e7on", "Shomir Wilson", "Rebecca J. Passonneau" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/vipulgupta1011\/calm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":172 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=RCdoMrg4I0", "bibtext":"@inproceedings{\ndu2024chinese,\ntitle={Chinese Tiny {LLM}: Pretraining a Chinese-Centered Large Language Model},\nauthor={Xeron Du and Zhouliang Yu and Songyang Gao and Ding Pan and Cheng Yuyang and Ziyang Ma and Ruibin Yuan and Xingwei Qu and Jiaheng Liu and Tianyu Zheng and Xinchen Luo and Guorui Zhou and Wenhu Chen and Ge Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=RCdoMrg4I0}\n}", "abstract":"In this study, we introduce $\\textbf{CT-LLM}$, a groundbreaking 2B large language model (LLM) that illustrates a pivotal shift towards prioritizing the Chinese language in the development of LLMs. Uniquely initiated from scratch, CT-LLM diverges from the conventional methodology by primarily incorporating Chinese textual data, utilizing an extensive corpus of 1,200 billion tokens, including 800 billion Chinese tokens and 400 billion English tokens. This strategic composition facilitates the model's exceptional proficiency in understanding and processing Chinese, a capability further enhanced through alignment techniques including supervised fine-tuning (SFT) and direct preference optimization (DPO). Demonstrating remarkable performance on the ChineseHardCase Benchmark, CT-LLM not only excels in Chinese language tasks but also showcases its adeptness in English through SFT. This research challenges the prevailing paradigm of training LLMs predominantly on English corpora and then adapting them to other languages, broadening the horizons for LLM training methodologies. By open-sourcing full-process of CT-LLM, we aim to foster further exploration and innovation within both the academic and industrial spheres, paving the way for more inclusive and versatile language models in the future.", "title":"Chinese Tiny LLM: Pretraining a Chinese-Centered Large Language Model", "authors":[ "Xeron Du", "Zhouliang Yu", "Songyang Gao", "Ding Pan", "Cheng Yuyang", "Ziyang Ma", "Ruibin Yuan", "Xingwei Qu", "Jiaheng Liu", "Tianyu Zheng", "Xinchen Luo", "Guorui Zhou", "Wenhu Chen", "Ge Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":173 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Qmq4zqdnWh", "bibtext":"@inproceedings{\nwadhwa2024using,\ntitle={Using Natural Language Explanations to Rescale Human Judgments},\nauthor={Manya Wadhwa and Jifan Chen and Junyi Jessy Li and Greg Durrett},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Qmq4zqdnWh}\n}", "abstract":"The rise of large language models (LLMs) has brought a critical need for high-quality human-labeled data, particularly for processes like human feedback and evaluation. A common practice is to label data via consensus annotation over human judgments. However, annotators' judgments for subjective tasks can differ in many ways: they may reflect different qualitative judgments about an example, and they may be mapped to a labeling scheme in different ways. We show that these nuances can be captured by natural language explanations, and propose a method to rescale ordinal annotations and explanations using LLMs. Specifically, we feed annotators' Likert ratings and corresponding explanations into an LLM and prompt it to produce a numeric score anchored in a scoring rubric. These scores should reflect the annotators' underlying assessments of the example. The rubric can be designed or modified after annotation, and include distinctions that may not have been known when the original error taxonomy was devised. We explore our technique in the context of rating system outputs for a document-grounded question answering task, where LLMs achieve near-human performance. Our method rescales the raw judgments without impacting agreement and brings the scores closer to human judgments grounded in the same scoring rubric.", "title":"Using Natural Language Explanations to Rescale Human Judgments", "authors":[ "Manya Wadhwa", "Jifan Chen", "Junyi Jessy Li", "Greg Durrett" ], "id":"Conference", "type":"Poster", "arxiv_id":"2305.14770", "GitHub":[ "https:\/\/github.com\/manyawadhwa\/explanation_based_rescaling" ], "paper_page":"https:\/\/huggingface.co\/papers\/2305.14770", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ "wadhma\/EBR" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":174 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=QdWhj0QZFw", "bibtext":"@inproceedings{\nliu2024llm,\ntitle={{LLM}360: Towards Fully Transparent Open-Source {LLM}s},\nauthor={Zhengzhong Liu and Aurick Qiao and Willie Neiswanger and Hongyi Wang and Bowen Tan and Tianhua Tao and Junbo Li and Yuqi Wang and Suqi Sun and Omkar Pangarkar and Richard Fan and Yi Gu and Victor Miller and Yonghao Zhuang and Guowei He and Haonan Li and Fajri Koto and Liping Tang and Nikhil Ranjan and Zhiqiang Shen and Roberto Iriondo and Cun Mu and Zhiting Hu and Mark Schulze and Preslav Nakov and Timothy Baldwin and Eric P. Xing},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=QdWhj0QZFw}\n}", "abstract":"The recent surge in open-source Large Language Models (LLMs), such as LLaMA, Falcon, and Mistral, provides diverse options for AI practitioners and researchers. However, most LLMs have only released partial artifacts, such as the final model weights or inference code, and technical reports increasingly limit their scope to high-level design choices and surface statistics. These choices hinder progress in the field by degrading transparency into the training of LLMs and forcing teams to rediscover many details in the training process. We present **LLM360**, an initiative to fully open-source LLMs, which advocates for all training code and data, model checkpoints, and intermediate results to be made available to the community. The goal of LLM360 is to support open and collaborative AI research by making the end-to-end LLM training process transparent and reproducible by everyone. As a first step of LLM360, we release two 7B parameter LLMs pre-trained from scratch, Amber and Crystal, including their training code, data, intermediate checkpoints, and analyses. We are committed to continually pushing the boundaries of LLMs through this open-source effort. More large-scale and stronger models are underway and will be released in the future.", "title":"LLM360: Towards Fully Transparent Open-Source LLMs", "authors":[ "Zhengzhong Liu", "Aurick Qiao", "Willie Neiswanger", "Hongyi Wang", "Bowen Tan", "Tianhua Tao", "Junbo Li", "Yuqi Wang", "Suqi Sun", "Omkar Pangarkar", "Richard Fan", "Yi Gu", "Victor Miller", "Yonghao Zhuang", "Guowei He", "Haonan Li", "Fajri Koto", "Liping Tang", "Nikhil Ranjan", "Zhiqiang Shen", "Roberto Iriondo", "Cun Mu", "Zhiting Hu", "Mark Schulze", "Preslav Nakov", "Timothy Baldwin", "Eric P. Xing" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/llm360\/analysis360" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":175 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=QbCHlIqbDJ", "bibtext":"@inproceedings{\nfan2024from,\ntitle={From Narratives to Numbers: Valid Inference Using Language Model Predictions from Verbal Autopsies},\nauthor={Shuxian Fan and Adam Visokay and Kentaro Hoffman and Stephen Salerno and Li Liu and Jeffrey T. Leek and Tyler McCormick},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=QbCHlIqbDJ}\n}", "abstract":"In settings where most deaths occur outside the healthcare system, verbal autopsies (VAs) are a common tool to monitor trends in causes of death (COD). VAs are interviews with a surviving caregiver or relative that are used to predict the decedent\u2019s COD. Turning VAs into actionable insights for researchers and policymakers requires two steps (i) predicting likely COD using the VA interview and (ii) performing inference with predicted CODs (e.g. modeling the breakdown of causes by demographic factors using a sample of deaths). In this paper, we develop a method for valid inference using outcomes (in our case COD) predicted from free-form text using state-of-the-art NLP techniques. This method, which we call multiPPI++, extends recent work in \u201cprediction-powered inference\u201d to multinomial classification. We leverage a suite of NLP techniques for COD prediction and, through empirical analysis of VA data, we demonstrate the effectiveness of our approach in handling transportability issues. multiPPI++ recovers ground truth estimates, regardless of which NLP model produced predictions and regardless of whether they were produced by a more accurate predictor like GPT-4-32k or a less accurate predictor like KNN. Our findings demonstrate the practical importance of inference correction for public health decision-making and suggests that if inference tasks are the end goal, having a small amount of contextually relevant, high quality labeled data is essential regardless of the NLP algorithm.", "title":"From Narratives to Numbers: Valid Inference Using Language Model Predictions from Verbal Autopsies", "authors":[ "Shuxian Fan", "Adam Visokay", "Kentaro Hoffman", "Stephen Salerno", "Li Liu", "Jeffrey T. Leek", "Tyler McCormick" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":176 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=QJvfpWSpWm", "bibtext":"@inproceedings{\nhassid2024the,\ntitle={The Larger the Better? Improved {LLM} Code-Generation via Budget Reallocation},\nauthor={Michael Hassid and Tal Remez and Jonas Gehring and Roy Schwartz and Yossi Adi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=QJvfpWSpWm}\n}", "abstract":"It is a common belief that large language models (LLMs) are better than smaller-sized ones. However, larger models also require significantly more time and compute during inference. This begs the question: what happens when both models operate under the same budget? (e.g., compute, run-time). To address this question, we analyze code generation LLMs of various sizes and make comparisons such as running a 70B model once vs. generating five outputs from a 13B model. We consider a standard unit-test setup, which can be used to select the correct output from the smaller model. Our findings reveal that the repeated use of smaller models can yield consistent improvements, with gains of up to 15% across five tasks. On the other hand, in scenarios where unit-tests are unavailable, a ranking-based selection of candidates from the smaller model falls short of the performance of a single output from larger ones. Our results highlight the potential of using smaller models instead of larger ones, and the importance of studying approaches for ranking LLM outputs.", "title":"The Larger the Better? Improved LLM Code-Generation via Budget Reallocation", "authors":[ "Michael Hassid", "Tal Remez", "Jonas Gehring", "Roy Schwartz", "Yossi Adi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.00725", "GitHub":[ "https:\/\/github.com\/slp-rl\/budget-realloc" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":177 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Pvn1dKreZW", "bibtext":"@inproceedings{\nqian2024merge,\ntitle={''Merge Conflicts!''' Exploring the Impacts of External Knowledge Distractors to Parametric Knowledge Graphs},\nauthor={Cheng Qian and Xinran Zhao and Tongshuang Wu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Pvn1dKreZW}\n}", "abstract":"Large language models (LLMs) acquire extensive knowledge during pre-training, known as their parametric knowledge. However, to remain up-to-date and align with human instructions, LLMs inevitably require external knowledge during interactions. This raises a crucial question: How will LLMs respond when external knowledge interferes with their parametric knowledge? To uncover the impacts systematically, we construct parametric knowledge graphs to reveal different LLM knowledge structures, and introduce external information through external knowledge distractors of varying degrees, methods, positions, and formats. Experiments on both closed and open-source models demonstrate that LLMs tend to believe in external knowledge sources, particularly when they direct conflict or make confounding changes within detailed contexts. We also discover while LLMs are sensitive to external knowledge veracity, they still get distracted by unrelated information. These findings highlight the mechanisms behind LLM's integration of external knowledge, even indirectly, during model-user interactions.", "title":"\"Merge Conflicts!'\" Exploring the Impacts of External Knowledge Distractors to Parametric Knowledge Graphs", "authors":[ "Cheng Qian", "Xinran Zhao", "Tongshuang Wu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":178 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=PPTrmvEnpW", "bibtext":"@inproceedings{\nkarvonen2024emergent,\ntitle={Emergent World Models and Latent Variable Estimation in Chess-Playing Language Models},\nauthor={Adam Karvonen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=PPTrmvEnpW}\n}", "abstract":"Language models have shown unprecedented capabilities, sparking debate over the source of their performance. Is it merely the outcome of learning syntactic patterns and surface level statistics, or do they extract semantics and a world model from the text? Prior work by Li et al. investigated this by training a GPT model on synthetic, randomly generated Othello games and found that the model learned an internal representation of the board state. We extend this work into the more complex domain of chess, training on real games and investigating our model's internal representations using linear probes and contrastive activations. The model is given no a priori knowledge of the game and is solely trained on next character prediction, yet we find evidence of internal representations of board state. We validate these internal representations by using them to make interventions on the model's activations and edit its internal board state. Unlike Li et al's prior synthetic dataset approach, our analysis finds that the model also learns to estimate latent variables like player skill to better predict the next character. We derive a player skill vector and add it to the model, improving the model's win rate by up to 2.6 times.", "title":"Emergent World Models and Latent Variable Estimation in Chess-Playing Language Models", "authors":[ "Adam Karvonen" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/adamkarvonen\/chess_llm_interpretability" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":179 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=PKfAq8N4fK", "bibtext":"@inproceedings{\nwu2024agentkit,\ntitle={AgentKit: Structured {LLM} Reasoning with Dynamic Graphs},\nauthor={Yue Wu and Yewen Fan and So Yeon Min and Shrimai Prabhumoye and Stephen Marcus McAleer and Ruslan Salakhutdinov and Yonatan Bisk and Yuanzhi Li and Tom Mitchell},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=PKfAq8N4fK}\n}", "abstract":"We propose an intuitive LLM prompting framework (AgentKit) for multifunctional agents. \nAgentKit offers a unified framework for explicitly constructing a complex \"thought process\" from simple natural language prompts. The basic building block in AgentKit is a **node**, containing a natural language prompt for a specific subtask. The user then puts together chains of nodes, in order to build a \"thought process\" for any problem, like stacking LEGO pieces. The chains of nodes can be designed to explicitly enforce a naturally **structured** \"thought process\". For example, for the task of writing a paper, one may start with the thought process of 1) identify a core message, 2) identify prior research gaps, etc.\nThe nodes in AgentKit can be designed and combined in different ways to implement multiple advanced capabilities including on-the-fly hierarchical planning, reflection, and learning from interactions. \nIn addition, due to the modular nature and the intuitive design to simulate explicit human thought process, a basic agent could be implemented as simple as a list of prompts for the subtasks and therefore could be designed and tuned by someone *without any programming experience*. Quantitatively, we show that agents designed through AgentKit achieve SOTA performance on Webshop and Crafter. These advances underscore AgentKit's potential in making LLM agents effective and accessible for a wider range of applications.", "title":"AgentKit: Structured LLM Reasoning with Dynamic Graphs", "authors":[ "Yue Wu", "Yewen Fan", "So Yeon Min", "Shrimai Prabhumoye", "Stephen Marcus McAleer", "Ruslan Salakhutdinov", "Yonatan Bisk", "Yuanzhi Li", "Tom Mitchell" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/holmeswww\/agentkit" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":180 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=PEQFHRUFca", "bibtext":"@inproceedings{\nzheng2024a,\ntitle={A Reparameterized Discrete Diffusion Model for Text Generation},\nauthor={Lin Zheng and Jianbo Yuan and Lei Yu and Lingpeng Kong},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=PEQFHRUFca}\n}", "abstract":"This work studies discrete diffusion probabilistic models with applications to natural language generation. We derive an alternative yet equivalent formulation of the sampling from discrete diffusion processes and leverage this insight to develop a family of reparameterized discrete diffusion models. The derived generic framework is highly flexible, offers a fresh perspective of the generation process in discrete diffusion models, and features more effective training and decoding techniques. We conduct extensive experiments to evaluate the text generation capability of our model, demonstrating significant improvements over existing diffusion models.", "title":"A Reparameterized Discrete Diffusion Model for Text Generation", "authors":[ "Lin Zheng", "Jianbo Yuan", "Lei Yu", "Lingpeng Kong" ], "id":"Conference", "type":"Poster", "arxiv_id":"2302.05737", "GitHub":[ "https:\/\/github.com\/hkunlp\/reparam-discrete-diffusion" ], "paper_page":"https:\/\/huggingface.co\/papers\/2302.05737", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":181 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=OJaWBhh61C", "bibtext":"@inproceedings{\nliu2024best,\ntitle={Best Practices and Lessons Learned on Synthetic Data},\nauthor={Ruibo Liu and Jerry Wei and Fangyu Liu and Chenglei Si and Yanzhe Zhang and Jinmeng Rao and Steven Zheng and Daiyi Peng and Diyi Yang and Denny Zhou and Andrew M. Dai},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=OJaWBhh61C}\n}", "abstract":"The success of AI models relies on the availability of large, diverse, and high-quality datasets, which can be challenging to obtain due to data scarcity, privacy concerns, and high costs. Synthetic data has emerged as a promising solution by generating artificial data that mimics real-world patterns. This paper provides an overview of synthetic data research, discussing its applications, challenges, and future directions. We present empirical evidence from prior art to demonstrate its effectiveness and highlight the importance of ensuring its factuality, fidelity, and unbiasedness. We emphasize the need for responsible use of synthetic data to build more powerful, inclusive, and trustworthy language models.", "title":"Best Practices and Lessons Learned on Synthetic Data", "authors":[ "Ruibo Liu", "Jerry Wei", "Fangyu Liu", "Chenglei Si", "Yanzhe Zhang", "Jinmeng Rao", "Steven Zheng", "Daiyi Peng", "Diyi Yang", "Denny Zhou", "Andrew M. Dai" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.07503", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.07503", "n_linked_authors":8, "upvotes":29, "num_comments":1, "n_authors":11, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":182 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=NikbrdtYvG", "bibtext":"@inproceedings{\npfau2024lets,\ntitle={Let{\\textquoteright}s Think Dot by Dot: Hidden computation in transformer language models},\nauthor={Jacob Pfau and William Merrill and Samuel R. Bowman},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=NikbrdtYvG}\n}", "abstract":"Chain-of-thought responses from language models improve performance across most benchmarks. However, it remains unclear to what extent these performance gains can be attributed to human-like task decomposition or simply the greater computation that additional tokens allow. We show that transformers can use meaningless filler tokens (e.g., \u2018......\u2019) in place of a chain of thought to solve two hard algorithmic tasks they could not solve when responding without intermediate tokens. However, we find empirically that learning to use filler tokens is difficult and requires specific, dense supervision to converge. We also provide a theoretical conjecture for the class of problems where filler tokens are useful in terms of the quantifier depth of a first-order formula. For problems satisfying this characterization, chain-of-thought tokens need not provide information about the intermediate computational steps involved in multi-token computations. In summary, our results show that additional tokens can provide computational benefits independent of token choice. The fact that intermediate tokens can act as filler tokens raises concerns about large language models engaging in unauditable, hidden computations that are increasingly detached from the observed chain-of-thought tokens.", "title":"Let\u2019s Think Dot by Dot: Hidden computation in transformer language models", "authors":[ "Jacob Pfau", "William Merrill", "Samuel R. Bowman" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":183 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Nd950RAcCW", "bibtext":"@inproceedings{\ncheng2024multihop,\ntitle={Multi-hop Question Answering under Temporal Knowledge Editing},\nauthor={Keyuan Cheng and Gang Lin and Haoyang Fei and Yuxuan Zhai and Lu Yu and Muhammad Asif Ali and Lijie Hu and Di Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Nd950RAcCW}\n}", "abstract":"Multi-hop question answering (MQA) under knowledge editing (KE) has garnered significant attention in the era of large language models. However, existing models for MQA under KE exhibit poor performance when dealing with questions containing explicit temporal contexts. To address this limitation, we propose a novel framework, namely TEMPoral knowLEdge augmented Multi-hop Question Answering (TEMPLE-MQA). Unlike previous methods, TEMPLE-MQA first constructs a time-aware graph (TAG) to store edit knowledge in a structured manner. Then, through our proposed inference path, structural retrieval, and joint reasoning stages, TEMPLE-MQA effectively discerns temporal contexts within the question query. Experiments on benchmark datasets demonstrate that TEMPLE-MQA significantly outperforms baseline models. Additionally, we contribute a new dataset, namely TKEMQA, which serves as the inaugural benchmark tailored specifically for MQA with temporal scopes.", "title":"Multi-hop Question Answering under Temporal Knowledge Editing", "authors":[ "Keyuan Cheng", "Gang Lin", "Haoyang Fei", "Yuxuan Zhai", "Lu Yu", "Muhammad Asif Ali", "Lijie Hu", "Di Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.00492", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":184 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=NV8yRJRET1", "bibtext":"@inproceedings{\nzala2024diagrammergpt,\ntitle={Diagrammer{GPT}: Generating Open-Domain, Open-Platform Diagrams via {LLM} Planning},\nauthor={Abhay Zala and Han Lin and Jaemin Cho and Mohit Bansal},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=NV8yRJRET1}\n}", "abstract":"Text-to-image (T2I) generation has seen significant growth over the past few years. Despite this, there has been little work on generating diagrams with T2I models. A diagram is a symbolic\/schematic representation that explains information using structurally rich and spatially complex visualizations (e.g., a dense combination of related objects, text labels, directional arrows\/lines, etc.). Existing state-of-the-art T2I models often fail at diagram generation because they lack fine-grained object layout control when many objects are densely connected via complex relations such as arrows\/lines, and also often fail to render comprehensible text labels. To address this gap, we present DiagrammerGPT, a novel two-stage text-to-diagram generation framework leveraging the layout guidance capabilities of LLMs to generate more accurate diagrams. In the first stage, we use LLMs to generate and iteratively refine \u2018diagram plans\u2019 (in a planner-auditor feedback loop). In the second stage, we use a diagram generator, DiagramGLIGEN, and a text label rendering module to generate diagrams (with clear text labels) following the diagram plans. To benchmark the text-to-diagram generation task, we introduce AI2D-Caption, a densely annotated diagram dataset built on top of the AI2D dataset. We show that our DiagrammerGPT framework produces more accurate diagrams, outperforming existing T2I models. We also provide comprehensive analysis, including open-domain diagram generation, multi-platform vector graphic diagram generation, human-in-the-loop editing, and multimodal planner\/auditor LLMs.", "title":"DiagrammerGPT: Generating Open-Domain, Open-Platform Diagrams via LLM Planning", "authors":[ "Abhay Zala", "Han Lin", "Jaemin Cho", "Mohit Bansal" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.12128", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.12128", "n_linked_authors":2, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ "abhayzala\/AI2D-Caption" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":185 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=NPAQ6FKSmK", "bibtext":"@inproceedings{\npan2024autonomous,\ntitle={Autonomous Evaluation and Refinement of Digital Agents},\nauthor={Jiayi Pan and Yichi Zhang and Nicholas Tomlin and Yifei Zhou and Sergey Levine and Alane Suhr},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=NPAQ6FKSmK}\n}", "abstract":"We show that domain-general automatic evaluators can significantly improve the performance of agents for web navigation and device control. \nWe experiment with multiple evaluation models that trade off between inference cost, modularity of design, and accuracy. \nWe validate the performance of these models in several popular benchmarks for digital agents, finding between 74.4 and 92.9% agreement with oracle evaluation metrics.\nFinally, we use these evaluators to improve the performance of existing agents via fine-tuning and inference-time guidance.\nWithout any additional supervision, we improve state-of-the-art performance by 29% on the popular benchmark WebArena, and achieve around 75% relative improvement in device control settings.\nWe release our code and data at \n [https:\/\/github.com\/Berkeley-NLP\/Agent-Eval-Refine](https:\/\/github.com\/Berkeley-NLP\/Agent-Eval-Refine)", "title":"Autonomous Evaluation and Refinement of Digital Agents", "authors":[ "Jiayi Pan", "Yichi Zhang", "Nicholas Tomlin", "Yifei Zhou", "Sergey Levine", "Alane Suhr" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.06474", "GitHub":[ "https:\/\/github.com\/berkeley-nlp\/agent-eval-refine" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.06474", "n_linked_authors":1, "upvotes":1, "num_comments":1, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ "Agent-Eval-Refine\/Captioner" ], "paper_page_exists_pre_conf":1, "unique_id":186 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=N5EYQSwW26", "bibtext":"@inproceedings{\nokazaki2024building,\ntitle={Building a Large Japanese Web Corpus for Large Language Models},\nauthor={Naoaki Okazaki and Kakeru Hattori and Hirai Shota and Hiroki Iida and Masanari Ohi and Kazuki Fujii and Taishi Nakamura and Mengsay Loem and Rio Yokota and Sakae Mizuki},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=N5EYQSwW26}\n}", "abstract":"Open Japanese large language models (LLMs) have been trained on the Japanese portions of corpora such as CC-100, mC4, and OSCAR. However, these corpora were not created for the quality of Japanese texts. This study builds a large Japanese web corpus by extracting and refining text from the Common Crawl archive (21 snapshots of approximately 63.4 billion pages crawled between 2020 and 2023). This corpus consists of approximately 312.1 billion characters (approximately 173 million pages), which is the largest of all available training corpora for Japanese LLMs, surpassing CC-100 (approximately 25.8 billion characters), mC4 (approximately 239.7 billion characters) and OSCAR 23.01 (approximately 74 billion characters). To confirm the quality of the corpus, we performed continual pre-training on Llama 2 7B, 13B, 70B, Mistral 7B v0.1, and Mixtral 8x7B as base LLMs and gained consistent (6.6-8.1 points) improvements on Japanese benchmark datasets. We also demonstrate that the improvement on Llama 2 13B brought from the presented corpus was the largest among those from other existing corpora.", "title":"Building a Large Japanese Web Corpus for Large Language Models", "authors":[ "Naoaki Okazaki", "Kakeru Hattori", "Hirai Shota", "Hiroki Iida", "Masanari Ohi", "Kazuki Fujii", "Taishi Nakamura", "Mengsay Loem", "Rio Yokota", "Sakae Mizuki" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.17733", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.17733", "n_linked_authors":2, "upvotes":3, "num_comments":0, "n_authors":10, "Models":[ "tokyotech-llm\/Swallow-7b-instruct-hf", "tokyotech-llm\/Swallow-70b-instruct-hf", "tokyotech-llm\/Swallow-MX-8x7b-NVE-v0.1", "tokyotech-llm\/Swallow-MS-7b-v0.1", "tokyotech-llm\/Swallow-13b-instruct-hf", "tokyotech-llm\/Swallow-7b-hf", "tokyotech-llm\/Swallow-13b-hf", "tokyotech-llm\/Swallow-7b-plus-hf", "tokyotech-llm\/Llama-3-Swallow-8B-v0.1", "tokyotech-llm\/Swallow-70b-hf", "tokyotech-llm\/Llama-3-Swallow-70B-v0.1", "tokyotech-llm\/Swallow-7b-NVE-instruct-hf", "tokyotech-llm\/Swallow-70b-NVE-instruct-hf", "tokyotech-llm\/Swallow-7b-NVE-hf", "tokyotech-llm\/Swallow-70b-NVE-hf", "tokyotech-llm\/Swallow-13b-NVE-hf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-4bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-NVE-instruct-hf-8bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-4bits", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-instruct-hf-8bits", "RichardErkhov\/tokyotech-llm_-_Swallow-13b-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-70b-NVE-instruct-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-MS-7b-v0.1-gguf", "RichardErkhov\/tokyotech-llm_-_Llama-3-Swallow-8B-v0.1-gguf", "RichardErkhov\/tokyotech-llm_-_Swallow-7b-hf-gguf", "RichardErkhov\/tokyotech-llm_-_Llama-3-Swallow-70B-v0.1-gguf" ], "Datasets":[ ], "Spaces":[ "featherless-ai\/try-this-model", "hayas\/Swallow-13B-instruct", "Darok\/Featherless-Feud", "mmnga\/vocabviewer", "Granther\/try-this-model", "emekaboris\/try-this-model", "isonuma\/marutenbo", "kmero\/tokyotech-llm-Swallow-70b-instruct-hf", "Huaibo\/tokyotech-llm-Swallow-7b-instruct-hf" ], "paper_page_exists_pre_conf":1, "unique_id":187 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MoitXWlXcS", "bibtext":"@inproceedings{\ngodey2024why,\ntitle={Why do small language models underperform? Studying Language Model Saturation via the Softmax Bottleneck},\nauthor={Nathan Godey and {\\'E}ric Villemonte de la Clergerie and Beno{\\^\\i}t Sagot},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MoitXWlXcS}\n}", "abstract":"Recent advances in language modeling consist in pretraining highly parameterized neural networks on extremely large web-mined text corpora. Training and inference with such models can be costly in practice, which incentivizes the use of smaller counterparts. However, it has been observed that smaller models can suffer from saturation, characterized as a drop in performance at some advanced point in training followed by a plateau. In this paper, we find that such saturation can be explained by a mismatch between the hidden dimension of smaller models and the high rank of the target contextual probability distribution. This mismatch affects the performance of the linear prediction head used in such models through the well-known softmax bottleneck phenomenon. We measure the effect of the softmax bottleneck in various settings and estimate that models based on less than roughly 1000 hidden dimensions tend to adopt degenerate latent representations in late pretraining, which leads to reduced evaluation performance.", "title":"Why do small language models underperform? Studying Language Model Saturation via the Softmax Bottleneck", "authors":[ "Nathan Godey", "\u00c9ric Villemonte de la Clergerie", "Beno\u00eet Sagot" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.07647", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.07647", "n_linked_authors":1, "upvotes":4, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":188 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MmBQSNHKUl", "bibtext":"@inproceedings{\nle2024are,\ntitle={Are Language Models Robust Coreference Resolvers?},\nauthor={Nghia T. Le and Alan Ritter},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MmBQSNHKUl}\n}", "abstract":"Recent work on extending coreference resolution across domains and languages relies on annotated data in both the target domain and language. At the same time, pre-trained large language models (LMs) have been reported to exhibit strong zero- and few-shot learning abilities across a wide range of NLP tasks. However, prior work mostly studied this ability using artificial sentence-level datasets such as the Winograd Schema Challenge. In this paper, we assess the feasibility of prompt-based coreference resolution by evaluating instruction-tuned language models on difficult, linguistically-complex coreference benchmarks (e.g., CoNLL-2012). We show that prompting for coreference can outperform current unsupervised coreference systems, although this approach appears to be reliant on high-quality mention detectors. Further investigations reveal that instruction-tuned LMs generalize surprisingly well across domains, languages, and time periods; yet continued fine-tuning of neural models should still be preferred if small amounts of annotated examples are available.", "title":"Are Language Models Robust Coreference Resolvers?", "authors":[ "Nghia T. Le", "Alan Ritter" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":189 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MkppMETE49", "bibtext":"@inproceedings{\nsharma2024information,\ntitle={Information Guided Regularization for Fine-tuning Language Models},\nauthor={Mandar Sharma and Nikhil Muralidhar and Shengzhe Xu and Raquib Bin Yousuf and Naren Ramakrishnan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MkppMETE49}\n}", "abstract":"The pretraining-fine-tuning paradigm has been the de facto strategy for transfer learning in modern language modeling. With the understanding that task adaptation in LMs is often a function of parameters shared across tasks, we argue that a more surgical approach to regularization needs to exist for smoother transfer learning. Towards this end, we investigate how the pretraining loss landscape is affected by these task-sensitive parameters through an information-theoretic lens. We then leverage the findings from our investigations to devise a novel approach to dropout for improved model regularization and better downstream generalization. This approach, named guided dropout, is both task & architecture agnostic and adds no computational overhead to the fine-tuning process. Through empirical evaluations, we showcase that our approach to regularization yields consistently better performance, even in scenarios of data paucity, compared to standardized baselines.", "title":"Information Guided Regularization for Fine-tuning Language Models", "authors":[ "Mandar Sharma", "Nikhil Muralidhar", "Shengzhe Xu", "Raquib Bin Yousuf", "Naren Ramakrishnan" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mandar-sharma\/guided-dropout" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":190 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MXLBXjQkmb", "bibtext":"@inproceedings{\nzhang2024negative,\ntitle={Negative Preference Optimization: From Catastrophic Collapse to Effective Unlearning},\nauthor={Ruiqi Zhang and Licong Lin and Yu Bai and Song Mei},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MXLBXjQkmb}\n}", "abstract":"Large Language Models (LLMs) often memorize sensitive, private, or copyrighted data during pre-training. LLM unlearning aims to eliminate the influence of undesirable data from the pre-trained model while preserving the model's utilities on other tasks. Several practical methods have recently been proposed for LLM unlearning, mostly based on gradient ascent (GA) on the loss of undesirable data. However, on certain unlearning tasks, these methods either fail to effectively unlearn the target data or suffer from catastrophic collapse --- a drastic degradation of the model's utilities. \n\nIn this paper, we propose \\emph{Negative Preference Optimization} (NPO), a simple alignment-inspired method that could efficiently and effectively unlearn a target dataset. We theoretically show that the progression toward catastrophic collapse by minimizing the NPO loss is exponentially slower than GA. Through experiments\non synthetic data and the benchmark TOFU dataset, we demonstrate that NPO-based methods achieve a better balance between unlearning the undesirable data and maintaining the model's utilities. \nWe also observe that NPO-based methods generate more sensible outputs than GA-based methods, whose outputs are often gibberish.\nRemarkably, on TOFU, NPO-based methods are the first to achieve reasonable unlearning results in forgetting 50\\% (or more) of the training data, whereas existing methods already struggle with forgetting 10\\% of training data.", "title":"Negative Preference Optimization: From Catastrophic Collapse to Effective Unlearning", "authors":[ "Ruiqi Zhang", "Licong Lin", "Yu Bai", "Song Mei" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05868", "GitHub":[ "https:\/\/github.com\/ucsb-nlp-chang\/uld" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":191 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MNLAbfZwh2", "bibtext":"@inproceedings{\nelmaaroufi2024scenicnl,\ntitle={Scenic{NL}: Generating Probabilistic Scenario Programs from Natural Language},\nauthor={Karim Elmaaroufi and Devan Shanker and Ana Cismaru and Marcell Vazquez-Chanlatte and Alberto Sangiovanni-Vincentelli and Matei Zaharia and Sanjit A. Seshia},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MNLAbfZwh2}\n}", "abstract":"For cyber-physical systems, including robotics and autonomous vehicles, mass deployment has been hindered by fatal errors that occur when operating in rare events. To better understand failure modes, companies meticulously recreate rare crash events in simulation, but current methods do not easily allow for exploring \u201dwhat if\u201d scenarios which could reveal how accidents might have been avoided. We present ScenicNL, an AI system that generates probabilistic scenario programs from natural language. Given the abundance of documented failures of autonomous vehicles due to regulatory requirements, we apply ScenicNL to police crash reports, providing a data-driven approach to capturing and understanding these failures. By using a probabilistic language such as Scenic, we can clearly and concisely represent such scenarios of interest and easily ask \u201cwhat if\u201d questions. We demonstrate how commonplace prompting techniques with Large Language Models are incapable of generating code for low-resource languages such as Scenic. We propose an AI system via the composition of several prompting techniques to extract the reasoning abilities needed to model probability distributions around the uncertainty in the crash events. Our system then uses Constrained Decoding and tools such as a compiler and simulator to produce scenario programs in this low-resource setting. We evaluate our system on publicly available autonomous vehicle crash reports in California from the last five years and share insights into how we generate code that is both semantically meaningful and syntactically correct. Finally, we release our code and a collection of over 500 crash reports from the California Department of Motor Vehicles.", "title":"ScenicNL: Generating Probabilistic Scenario Programs from Natural Language", "authors":[ "Karim Elmaaroufi", "Devan Shanker", "Ana Cismaru", "Marcell Vazquez-Chanlatte", "Alberto Sangiovanni-Vincentelli", "Matei Zaharia", "Sanjit A. Seshia" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.03709", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":192 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MLD1cwfjUb", "bibtext":"@inproceedings{\nebrahimi2024your,\ntitle={Your Context Is Not an Array: Unveiling Random Access Limitations in Transformers},\nauthor={MohammadReza Ebrahimi and Sunny Panchal and Roland Memisevic},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MLD1cwfjUb}\n}", "abstract":"Despite their recent successes, Transformer-based large language models show surprising failure modes. A well-known example of such failure modes is their inability to length-generalize: solving problem instances at inference time that are longer than those seen during training. In this work, we further explore the root cause of this failure by performing a detailed analysis of model behaviors on the simple parity task. Our analysis suggests that length generalization failures are intricately related to a model's inability to perform random memory accesses within its context window. We present supporting evidence for this hypothesis by demonstrating the effectiveness of methodologies that circumvent the need for indexing or that enable random token access indirectly, through content-based addressing. We further show where and how the failure to perform random memory access manifests through attention map visualizations.", "title":"Your Context Is Not an Array: Unveiling Random Access Limitations in Transformers", "authors":[ "MohammadReza Ebrahimi", "Sunny Panchal", "Roland Memisevic" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.05506", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2408.05506", "n_linked_authors":1, "upvotes":8, "num_comments":2, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":193 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=MI52iXSSNy", "bibtext":"@inproceedings{\nfu2024commonsenseti,\ntitle={Commonsense-T2I Challenge: Can Text-to-Image Generation Models Understand Commonsense?},\nauthor={Xingyu Fu and Muyu He and Yujie Lu and William Yang Wang and Dan Roth},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=MI52iXSSNy}\n}", "abstract":"We present a novel task and benchmark for evaluating the ability of text-to-image(T2I) generation models to produce images that align with commonsense in real life, which we call Commonsense-T2I. Given two adversarial text prompts containing an identical set of action words with minor differences, such as *a lightbulb without electricity* vs. *a lightbulb with electricity*, we evaluate whether T2I models can conduct visual-commonsense reasoning, e.g. produce images that fit *The lightbulb is unlit* vs. *The lightbulb is lit* correspondingly.\nCommonsense-T2I presents an adversarial challenge, providing pairwise text prompts along with expected outputs.\nThe dataset is carefully hand-curated by experts and annotated with fine-grained labels, such as commonsense type and likelihood of the expected outputs, to assist analyzing model behavior. We benchmark a variety of state-of-the-art (sota) T2I models and surprisingly find that, there is still a large gap between image synthesis and real life photos--even the DALL-E 3 model could only achieve 48.92% on Commonsense-T2I, and the stable diffusion XL model only achieves 24.92% accuracy. Our experiments show that GPT-enriched prompts cannot solve this challenge, and we include a detailed analysis about possible reasons for such deficiency. We aim for Commonsense-T2I to serve as a high-quality evaluation benchmark for T2I commonsense checking, fostering advancements in real life image generation.", "title":"Commonsense-T2I Challenge: Can Text-to-Image Generation Models Understand Commonsense?", "authors":[ "Xingyu Fu", "Muyu He", "Yujie Lu", "William Yang Wang", "Dan Roth" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":194 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=LzpaUxcNFK", "bibtext":"@inproceedings{\nvacareanu2024from,\ntitle={From Words to Numbers: Your Large Language Model Is Secretly A Capable Regressor When Given In-Context Examples},\nauthor={Robert Vacareanu and Vlad Andrei Negru and Vasile Suciu and Mihai Surdeanu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=LzpaUxcNFK}\n}", "abstract":"We analyze how well pre-trained large language models (e.g., Llama2, GPT-4, Claude 3, etc) can do linear and non-linear regression when given in-context examples, without any additional training or gradient updates. Our findings reveal that several large language models (e.g., GPT-4, Claude 3) are able to perform regression tasks with a performance rivaling (or even outperforming) that of traditional supervised methods such as Random Forest, Bagging, or Gradient Boosting. For example, on the challenging Friedman \\#2 regression dataset, Claude 3 outperforms many supervised methods such as AdaBoost, SVM, Random Forest, KNN, or Gradient Boosting.\nWe then investigate how well the performance of large language models scales with the number of in-context exemplars. We borrow from the notion of regret from online learning and empirically show that LLMs are capable of obtaining a sub-linear regret.", "title":"From Words to Numbers: Your Large Language Model Is Secretly A Capable Regressor When Given In-Context Examples", "authors":[ "Robert Vacareanu", "Vlad Andrei Negru", "Vasile Suciu", "Mihai Surdeanu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.07544", "GitHub":[ "https:\/\/github.com\/robertvacareanu\/llm4regression" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.07544", "n_linked_authors":1, "upvotes":18, "num_comments":1, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":195 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Lmjgl2n11u", "bibtext":"@inproceedings{\nmondorf2024beyond,\ntitle={Beyond Accuracy: Evaluating the Reasoning Behavior of Large Language Models - A Survey},\nauthor={Philipp Mondorf and Barbara Plank},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Lmjgl2n11u}\n}", "abstract":"Large language models (LLMs) have recently shown impressive performance on tasks involving reasoning, leading to a lively debate on whether these models possess reasoning capabilities similar to humans. However, despite these successes, the depth of LLMs' reasoning abilities remains uncertain. This uncertainty partly stems from the predominant focus on task performance, measured through shallow accuracy metrics, rather than a thorough investigation of the models' reasoning behavior. This paper seeks to address this gap by providing a comprehensive review of studies that go beyond task accuracy, offering deeper insights into the models' reasoning processes. Furthermore, we survey prevalent methodologies to evaluate the reasoning behavior of LLMs, emphasizing current trends and efforts towards more nuanced reasoning analyses. Our review suggests that LLMs tend to rely on surface-level patterns and correlations in their training data, rather than on sophisticated reasoning abilities. Additionally, we identify the need for further research that delineates the key differences between human and LLM-based reasoning. Through this survey, we aim to shed light on the complex reasoning processes within LLMs.", "title":"Beyond Accuracy: Evaluating the Reasoning Behavior of Large Language Models - A Survey", "authors":[ "Philipp Mondorf", "Barbara Plank" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":196 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=LWfDcI6txJ", "bibtext":"@inproceedings{\narmengol-estap{\\'e}2024forklift,\ntitle={Forklift: An Extensible Neural Lifter},\nauthor={Jordi Armengol-Estap{\\'e} and Rodrigo C. O. Rocha and Jackson Woodruff and Pasquale Minervini and Michael O'Boyle},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=LWfDcI6txJ}\n}", "abstract":"The escalating demand to migrate legacy software across different Instruction Set Architectures (ISAs) has driven the development of assembly-to-assembly translators to map between their respective assembly languages. However, the development of these tools requires substantial engineering effort. State-of-the-art approaches use lifting, a technique where source assembly code is translated to an architecture-independent intermediate representation (IR) \u2014 for example, the LLVM IR \u2014 and use a pre-existing compiler to recompile the IR to the target ISA. However, the hand-written rules these lifters employ are sensitive to the particular compiler and optimization level used to generate the code and require significant engineering effort to support each new ISA. We propose Forklift, the first neural lifter that learns how to translate assembly to LLVM IR using a token-level encoder-decoder Transformer. We show how to incrementally add support to new ISAs by fine tuning the assembly encoder and freezing the IR decoder, improving the overall accuracy and efficiency. We collect millions of parallel LLVM IR, x86, ARM, and RISC-V programs across compilers and optimization levels to train Forklift and set up an input\/output-based accuracy harness. We evaluate Forklift on two challenging benchmark suites and translate 2.5x more x86 programs than a state-of-the-art hand-written lifter and 4.4x more x86 programs than GPT-4 as well as enabling translation from new ISAs.", "title":"Forklift: An Extensible Neural Lifter", "authors":[ "Jordi Armengol-Estap\u00e9", "Rodrigo C. O. Rocha", "Jackson Woodruff", "Pasquale Minervini", "Michael O'Boyle" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.16041", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":197 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=LKEJPySnlt", "bibtext":"@inproceedings{\nzhong2024lory,\ntitle={Lory: Fully Differentiable Mixture-of-Experts for Autoregressive Language Model Pre-training},\nauthor={Zexuan Zhong and Mengzhou Xia and Danqi Chen and Mike Lewis},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=LKEJPySnlt}\n}", "abstract":"Mixture-of-experts (MoE) models facilitate ef\ufb01cient scaling; however, training the router network introduces the challenge of optimizing a non-differentiable, discrete objective. Recently, a fully-differentiable MoE architecture SMEAR was proposed (Muqeeth et al., 2023), which softly merges experts in the parameter space. Nevertheless, its effectiveness was only demonstrated in downstream \ufb01ne-tuning on classi\ufb01cation tasks. In this paper, we present Lory, a novel approach that scales such architectures to autoregressive language model pre-training. Lory introduces two key techniques: (1) a causal segment routing strategy that achieves high ef\ufb01ciency for expert merging operations while preserving the autoregressive nature of language models; (2) a similarity-based data batching method that encourages expert specialization by grouping similar documents in training instances. We pre-train a series of Lory models from scratch on 150B tokens, with up to 32 experts and 30B (1.5B active) parameters. Experimental results show signi\ufb01cant performance gains over parameter-matched dense models in both perplexity (+13.9%) and a variety of downstream tasks (+1.5%-11.1%). Despite segment-level routing, Lory models achieve competitive performance compared to state-of-the-art MoE models with token-level routing. We further demonstrate that the trained experts capture domain-level specialization without supervision. Our work highlights the potential of fully-differentiable MoE architectures for language model pre-training and advocates future research in this area.", "title":"Lory: Fully Differentiable Mixture-of-Experts for Autoregressive Language Model Pre-training", "authors":[ "Zexuan Zhong", "Mengzhou Xia", "Danqi Chen", "Mike Lewis" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":198 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=LFfktMPAci", "bibtext":"@inproceedings{\nross2024what,\ntitle={What makes a good metric? Evaluating automatic metrics for text-to-image consistency},\nauthor={Candace Ross and Melissa Hall and Adriana Romero-Soriano and Adina Williams},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=LFfktMPAci}\n}", "abstract":"Language models are increasingly being incorporated as components in larger AI systems for various purposes, from prompt optimization to automatic evaluation. In this work, we analyze the construct validity of four recent, commonly used methods for measuring text-to-image consistency---CLIPScore, TIFA, VPEval, and DSG---which rely on language models and\/or VQA models as components. We define construct validity for text-image consistency metrics as a set of desiderata that text-image consistency metrics should have, and find that no tested metric satisfies all of them. We find that metrics lack sufficient sensitivity to language and visual properties. Next, we find that TIFA, VPEval and DSG contribute novel information above and beyond CLIPScore, but also that they correlate highly with each other. We also ablate different aspects of the text-image consistency metrics and find that not all model components are strictly necessary, also a symptom of insufficient sensitivity to visual information. Finally, we show that all three VQA-based metrics likely rely on familiar text shortcuts (such as yes-bias in QA) that call their aptitude as quantitative evaluations of model performance into question.", "title":"What makes a good metric? Evaluating automatic metrics for text-to-image consistency", "authors":[ "Candace Ross", "Melissa Hall", "Adriana Romero-Soriano", "Adina Williams" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":199 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=KqK5XcgEhR", "bibtext":"@inproceedings{\nzhao2024empowering,\ntitle={Empowering Large Language Model Agents through Action Learning},\nauthor={Haiteng Zhao and Chang Ma and Guoyin Wang and Jing Su and Lingpeng Kong and Jingjing Xu and Zhi-Hong Deng and Hongxia Yang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=KqK5XcgEhR}\n}", "abstract":"Large Language Model (LLM) Agents have recently garnered increasing interest yet they are limited in their ability to learn from trial and error, a key element of intelligent behavior. In this work, we argue that the capacity to learn new actions from experience is fundamental to the advancement of learning in LLM agents. While humans naturally expand their action spaces and develop skills through experiential learning, LLM agents typically operate within fixed action spaces, limiting their potential for growth. To address these challenges, our study explores open-action learning for language agents. We introduce a framework LearnAct with an iterative learning strategy to create and improve actions in the form of Python functions. In each iteration, LLM revises and updates the currently available actions based on the errors identified in unsuccessful training tasks, thereby enhancing action effectiveness. Our experimental evaluations across Robotic Planning and Alfworld environments reveal that after learning on a few training task instances, our approach to open-action learning markedly improves agent performance for the type of task (by 32 percent in AlfWorld compared to ReAct+Reflexion, for instance) highlighting the importance of experiential action learning in the development of more intelligent LLM agents.", "title":"Empowering Large Language Model Agents through Action Learning", "authors":[ "Haiteng Zhao", "Chang Ma", "Guoyin Wang", "Jing Su", "Lingpeng Kong", "Jingjing Xu", "Zhi-Hong Deng", "Hongxia Yang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.15809", "GitHub":[ "https:\/\/github.com\/zhao-ht\/learnact" ], "paper_page":"https:\/\/huggingface.co\/papers\/2402.15809", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":8, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":200 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=KidynPuLNW", "bibtext":"@inproceedings{\npeng2024on,\ntitle={On Limitations of the Transformer Architecture},\nauthor={Binghui Peng and Srini Narayanan and Christos Papadimitriou},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=KidynPuLNW}\n}", "abstract":"What are the root causes of hallucinations in large language models (LLMs)? We use Communication Complexity to prove that the Transformer layer is incapable of composing functions (e.g., identify a grandparent of a person in a genealogy) if the domains of the functions are large enough; we show through examples that this inability is already empirically present when the domains are quite small. We also point out that several mathematical tasks that are at the core of the so-called compositional tasks thought to be hard for LLMs are unlikely to be solvable by Transformers, for large enough instances and assuming that certain well accepted conjectures in the field of Computational Complexity are true.", "title":"On Limitations of the Transformer Architecture", "authors":[ "Binghui Peng", "Srini Narayanan", "Christos Papadimitriou" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":201 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=KZd1EErRJ1", "bibtext":"@inproceedings{\nfu2024isobench,\ntitle={IsoBench: Benchmarking Multimodal Foundation Models on Isomorphic Representations},\nauthor={Deqing Fu and Ruohao Guo and Ghazal Khalighinejad and Ollie Liu and Bhuwan Dhingra and Dani Yogatama and Robin Jia and Willie Neiswanger},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=KZd1EErRJ1}\n}", "abstract":"Current foundation models exhibit impressive capabilities when prompted either with text only or with both image and text inputs. But do their capabilities change depending on the input modality? In this work, we propose **IsoBench**, a benchmark dataset containing problems from four major areas: math, science, algorithms, and games. Each example is presented with multiple **isomorphic representations** of inputs, such as visual, textual, and mathematical presentations. IsoBench provides fine-grained feedback to diagnose performance gaps caused by the form of the representation. Across various foundation models, we observe that on the same problem, models have a consistent preference towards textual representations. Most prominently, when evaluated on all IsoBench problems, Claude-3 Opus performs 28.66 points worse when provided with images instead of text; similarly, GPT-4 Turbo is 18.71 points worse and Gemini Pro is 14.87 points worse. Finally, we present two prompting techniques, *IsoCombination* and *IsoScratchPad*, which improve model performance by considering combinations of, and translations between, different input representations.", "title":"IsoBench: Benchmarking Multimodal Foundation Models on Isomorphic Representations", "authors":[ "Deqing Fu", "Ruohao Guo", "Ghazal Khalighinejad", "Ollie Liu", "Bhuwan Dhingra", "Dani Yogatama", "Robin Jia", "Willie Neiswanger" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01266", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.01266", "n_linked_authors":3, "upvotes":1, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ "isobench\/IsoBench" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":202 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=K1M3gLW0MX", "bibtext":"@inproceedings{\nding2024on,\ntitle={On Fairness of Low-Rank Adaptation of Large Models},\nauthor={Zhoujie Ding and Ken Liu and Pura Peetathawatchai and Berivan Isik and Sanmi Koyejo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=K1M3gLW0MX}\n}", "abstract":"Low-rank adaptation of large models, particularly LoRA, has gained traction due to its computational efficiency. This efficiency, contrasted with the prohibitive costs of full-model fine-tuning, means that practitioners often turn to LoRA without a complete understanding of its ramifications. In this study, we focus on fairness and ask whether LoRA has an unexamined impact on utility, calibration, and resistance to membership inference across different subgroups (e.g., genders, races, religions) compared to a full-model fine-tuning baseline. We present extensive experiments across vision and language domains and across classification and generation tasks using ViT-Base, Swin-v2-Large, Llama-2 7B, and Mistral 7B. Intriguingly, experiments suggest that while one can isolate cases where LoRA exacerbates model bias across subgroups, the pattern is inconsistent---in many cases, LoRA has equivalent or even improved fairness compared to the base model or its full fine-tuning baseline. We also examine the complications of evaluating fine-tuning fairness relating to task design and model token bias, calling for more careful fairness evaluations in future work.", "title":"On Fairness of Low-Rank Adaptation of Large Models", "authors":[ "Zhoujie Ding", "Ken Liu", "Pura Peetathawatchai", "Berivan Isik", "Sanmi Koyejo" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kenziyuliu\/lora-fairness" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":203 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Jd0bCD12DS", "bibtext":"@inproceedings{\nchua2024mind,\ntitle={Mind the Privacy Unit! User-Level Differential Privacy for Language Model Fine-Tuning},\nauthor={Lynn Chua and Badih Ghazi and Yangsibo Huang and Pritish Kamath and Ravi Kumar and Daogao Liu and Pasin Manurangsi and Amer Sinha and Chiyuan Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Jd0bCD12DS}\n}", "abstract":"Large language models (LLMs) have emerged as powerful tools for tackling complex tasks across diverse domains, but they also raise privacy concerns when fine-tuned on sensitive data due to potential memorization. While differential privacy (DP) offers a promising solution by ensuring models are \u201calmost indistinguishable\u201d with or without any particular privacy unit, current evaluations on LLMs mostly treat each example (text record) as the privacy unit. This leads to uneven user privacy guarantees when contributions per user vary. We therefore study user-level DP motivated by applications where it is necessary to ensure uniform privacy protection across users. We present a systematic evaluation of user-level DP for LLM fine-tuning on natural language generation tasks. Focusing on two mechanisms for achieving user-level DP guarantees, Group Privacy and User-wise DP-SGD, we investigate design choices like data selection strategies and parameter tuning for the best privacy-utility tradeoff.", "title":"Mind the Privacy Unit! User-Level Differential Privacy for Language Model Fine-Tuning", "authors":[ "Lynn Chua", "Badih Ghazi", "Yangsibo Huang", "Pritish Kamath", "Ravi Kumar", "Daogao Liu", "Pasin Manurangsi", "Amer Sinha", "Chiyuan Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2406.14322", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":204 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=JXcXnJJSuL", "bibtext":"@inproceedings{\njung2024informationtheoretic,\ntitle={Information-Theoretic Distillation for Reference-less Summarization},\nauthor={Jaehun Jung and Ximing Lu and Liwei Jiang and Faeze Brahman and Peter West and Pang Wei Koh and Yejin Choi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=JXcXnJJSuL}\n}", "abstract":"The current winning recipe for automatic summarization is using proprietary large-scale language models (LLMs) such as ChatGPT as is, or imitation learning from them as teacher models. While increasingly ubiquitous dependence on such large-scale language models is convenient, there remains an important question of whether small-scale models could have achieved competitive results, if we were to seek an alternative learning method---that allows for a more cost-efficient, controllable, yet powerful summarizer. We present InfoSumm, a novel framework to distill a powerful summarizer based on the information-theoretic objective for summarization, without relying on either the LLM's capability or human-written references. To achieve this, we first propose a novel formulation of the desiderata of summarization (saliency, faithfulness and brevity) through the lens of mutual information between the original document and the summary. Based on this formulation, we start off from Pythia-2.8B as the teacher model, which is not yet capable of summarization, then self-train the model to optimize for the information-centric measures of ideal summaries. Distilling from the improved teacher, we arrive at a compact but powerful summarizer with only 568M parameters that performs competitively against ChatGPT, without ever relying on ChatGPT's capabilities. Extensive analysis demonstrates that our approach outperforms in-domain supervised models in human evaluation, let alone state-of-the-art unsupervised methods, and wins over ChatGPT in controllable summarization.", "title":"Information-Theoretic Distillation for Reference-less Summarization", "authors":[ "Jaehun Jung", "Ximing Lu", "Liwei Jiang", "Faeze Brahman", "Peter West", "Pang Wei Koh", "Yejin Choi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":205 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=IW1PR7vEBf", "bibtext":"@inproceedings{\nbehnamghader2024llmvec,\ntitle={{LLM}2Vec: Large Language Models Are Secretly Powerful Text Encoders},\nauthor={Parishad BehnamGhader and Vaibhav Adlakha and Marius Mosbach and Dzmitry Bahdanau and Nicolas Chapados and Siva Reddy},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=IW1PR7vEBf}\n}", "abstract":"Large decoder-only language models (LLMs) are the state-of-the-art models on most of today's NLP tasks and benchmarks. Yet, the community is only slowly adopting these models for text embedding tasks, which require rich contextualized representations. In this work, we introduce LLM2Vec, a simple unsupervised approach that can transform any decoder-only LLM into a strong text encoder. LLM2Vec consists of three simple steps: 1) enabling bidirectional attention, 2) masked next token prediction, and 3) unsupervised contrastive learning. We demonstrate the effectiveness of LLM2Vec by applying it to 4 popular LLMs ranging from 1.3B to 8B parameters and evaluate the transformed models on English word- and sequence-level tasks. We outperform encoder-only models by a large margin on word-level tasks and reach a new unsupervised state-of-the-art performance on the Massive Text Embeddings Benchmark (MTEB). Moreover, when combining LLM2Vec with supervised contrastive learning, we achieve state-of-the-art performance on MTEB among models that train only on publicly available data (as of May 24, 2024). Our strong empirical results and extensive analysis demonstrate that LLMs can be effectively transformed into universal text encoders in a parameter-efficient manner without the need for expensive adaptation or synthetic GPT-4 generated data.", "title":"LLM2Vec: Large Language Models Are Secretly Powerful Text Encoders", "authors":[ "Parishad BehnamGhader", "Vaibhav Adlakha", "Marius Mosbach", "Dzmitry Bahdanau", "Nicolas Chapados", "Siva Reddy" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.05961", "GitHub":[ "https:\/\/github.com\/mcgill-nlp\/llm2vec" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.05961", "n_linked_authors":5, "upvotes":64, "num_comments":5, "n_authors":6, "Models":[ "McGill-NLP\/LLM2Vec-Meta-Llama-3-8B-Instruct-mntp-supervised", "McGill-NLP\/LLM2Vec-Mistral-7B-Instruct-v2-mntp-supervised", "McGill-NLP\/LLM2Vec-Meta-Llama-3-8B-Instruct-mntp", "McGill-NLP\/LLM2Vec-Mistral-7B-Instruct-v2-mntp", "McGill-NLP\/LLM2Vec-Mistral-7B-Instruct-v2-mntp-unsup-simcse", "knowledgator\/Qwen-encoder-0.5B", "McGill-NLP\/LLM2Vec-Sheared-LLaMA-mntp", "McGill-NLP\/LLM2Vec-Sheared-LLaMA-mntp-supervised", "McGill-NLP\/LLM2Vec-Meta-Llama-3-8B-Instruct-mntp-unsup-simcse", "McGill-NLP\/LLM2Vec-Llama-2-7b-chat-hf-mntp-supervised", "McGill-NLP\/LLM2Vec-Sheared-LLaMA-mntp-unsup-simcse", "knowledgator\/Sheared-LLaMA-encoder-1.3B", "knowledgator\/Qwen-encoder-1.5B", "macadeliccc\/dolphin-2.9-llama3-8b-emb", "McGill-NLP\/LLM2Vec-Llama-2-7b-chat-hf-mntp", "McGill-NLP\/LLM2Vec-Llama-2-7b-chat-hf-mntp-unsup-simcse", "knowledgator\/Llama-encoder-1.0B", "uzabase\/LLM2Vec-Llama-2-7b-hf-mntp", "uzabase\/LLM2Vec-Llama-2-7b-hf-wikipedia-jp-mntp", "uzabase\/LLM2Vec-Swallow-7b-hf-wikipedia-jp-mntp", "uzabase\/LLM2Vec-Llama-2-7b-hf-mntp-unsup-simcse", "uzabase\/LLM2Vec-Llama-2-7b-hf-wikipedia-jp-mntp-unsup-simcse", "uzabase\/LLM2Vec-Swallow-7b-hf-wikipedia-jp-mntp-unsup-simcse", "RichardErkhov\/knowledgator_-_Llama-encoder-1.0B-gguf", "RichardErkhov\/knowledgator_-_Qwen-encoder-0.5B-gguf", "RichardErkhov\/knowledgator_-_Qwen-encoder-1.5B-gguf" ], "Datasets":[ ], "Spaces":[ "mteb\/leaderboard", "mteb\/arena", "Nymbo\/MTEB-Arena", "Abhijit-192-168-1-1\/example_LLM2Vec" ], "paper_page_exists_pre_conf":1, "unique_id":206 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=IPZ28ZqD4I", "bibtext":"@inproceedings{\nyee2024faithful,\ntitle={Faithful and Unfaithful Error Recovery in Chain of Thought},\nauthor={Evelyn Yee and Alice Li and Chenyu Tang and Yeon Ho Jung and Ramamohan Paturi and Leon Bergen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=IPZ28ZqD4I}\n}", "abstract":"Large language models (LLMs) often improve their performance in downstream tasks when they generate Chain of Thought reasoning text before producing an answer. We investigate how LLMs recover from errors in Chain of Thought. Through analysis of error recovery behaviors, we find evidence for unfaithfulness in Chain of Thought, which occurs when models arrive at the correct answer despite invalid reasoning text. We identify factors that shift LLM recovery behavior: LLMs recover more frequently from obvious errors and in contexts that provide more evidence for the correct answer. Critically, these factors have divergent effects on faithful and unfaithful recoveries. \nOur results indicate that there are distinct mechanisms driving faithful and unfaithful error recoveries. Selective targeting of these mechanisms may be able to drive down the rate of unfaithful reasoning and improve model interpretability.", "title":"Faithful and Unfaithful Error Recovery in Chain of Thought", "authors":[ "Evelyn Yee", "Alice Li", "Chenyu Tang", "Yeon Ho Jung", "Ramamohan Paturi", "Leon Bergen" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":207 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=INivcBeIDK", "bibtext":"@inproceedings{\nzhu2024autodan,\ntitle={Auto{DAN}: Interpretable Gradient-Based Adversarial Attacks on Large Language Models},\nauthor={Sicheng Zhu and Ruiyi Zhang and Bang An and Gang Wu and Joe Barrow and Zichao Wang and Furong Huang and Ani Nenkova and Tong Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=INivcBeIDK}\n}", "abstract":"Red-teaming Large Language Models (LLMs) requires jailbreak attacks that can comprehensively characterize the vulnerabilities of LLMs. Current blackbox attacks are limited by predefined jailbreak strategies, while whitebox attacks can only generate gibberish attack prompts detectable by perplexity filters. In this paper, we propose a new whitebox attack, named AutoDAN, that merges gradient-based token-wise optimization with controllable text generation. AutoDAN can generate coherent attack prompts on various LLMs that bypass any perplexity filter while having high attack success rates. Notably, these attack prompts spontaneously exhibit jailbreak strategies commonly seen in manual jailbreaks, such as hypothetical scenarios and non-English languages, without any prior knowledge of them. These interpretable attack prompts also generalize better to unseen harmful behaviors and transfer better to blackbox LLMs than gibberish ones. Moreover, we apply AutoDAN to two other red-teaming tasks: prompt leaking and generating falsely censored harmless user requests, demonstrating its flexibility over blackbox attacks. Our work offers an additional tool for red-teaming and understanding jailbreak mechanisms via interpretability.", "title":"AutoDAN: Interpretable Gradient-Based Adversarial Attacks on Large Language Models", "authors":[ "Sicheng Zhu", "Ruiyi Zhang", "Bang An", "Gang Wu", "Joe Barrow", "Zichao Wang", "Furong Huang", "Ani Nenkova", "Tong Sun" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/rotaryhammer\/code-autodan" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":208 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=IBCBMeAhmC", "bibtext":"@inproceedings{\nliu2024evaluating,\ntitle={Evaluating Language Models for Efficient Code Generation},\nauthor={Jiawei Liu and Songrun Xie and Junhao Wang and Yuxiang Wei and Yifeng Ding and LINGMING ZHANG},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=IBCBMeAhmC}\n}", "abstract":"We introduce Differential Performance Evaluation (DPE), a framework designed to reliably evaluate Large Language Models (LLMs) for efficient code generation. Traditional coding benchmarks often fail to provide reliable insights into code efficiency, due to their reliance on simplistic test inputs and the absence of effective compound metrics. DPE addresses these issues by focusing on efficiency-demanding programming tasks and establishing an insightful compound metric for performance evaluation. DPE operates in two phases: To curate efficiency datasets, it selects efficiency-demanding tasks from existing coding benchmarks and generates computationally expensive inputs to stress the efficiency of LLM solutions. To assess the code efficiency, DPE profiles the new solution and compares it globally against a set of reference solutions that exhibit distinct efficiency levels, where the matched level defines its efficiency score. As a proof of concept, we use DPE to create EvalPerf, a benchmark with 121 performance-challenging coding tasks. Our comprehensive evaluation draws interesting findings on the efficiency impact of model sizes, instruction tuning, and prompting. For example, while the scaling law fails to account for code efficiency, general instruction tuning benefits both code correctness and efficiency. We also evaluate the evaluation by examining the effectiveness of DPE, showing that EvalPerf is reliable and convenient to use even across platforms.", "title":"Evaluating Language Models for Efficient Code Generation", "authors":[ "Jiawei Liu", "Songrun Xie", "Junhao Wang", "Yuxiang Wei", "Yifeng Ding", "LINGMING ZHANG" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.06450", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2408.06450", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":209 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=IA8CWtNkUr", "bibtext":"@inproceedings{\nsanyal2024early,\ntitle={Early Weight Averaging meets High Learning Rates for {LLM} Pre-training},\nauthor={Sunny Sanyal and Atula Tejaswi Neerkaje and Jean Kaddour and Abhishek Kumar and sujay sanghavi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=IA8CWtNkUr}\n}", "abstract":"Training Large Language Models (LLMs) incurs significant cost; hence, any strategy that accelerates model convergence is helpful. In this paper, we investigate the ability of a simple idea \u2013 checkpoint averaging along the trajectory of a training run \u2013 to improve both convergence and generalization quite early during training. Here we show that models trained with high learning rates observe higher gains due to checkpoint averaging. Furthermore, these gains are amplified when checkpoints are sampled with considerable spacing in training steps. Our training recipe outperforms conventional training and popular checkpoint averaging baselines such as exponential moving average (EMA) and stochastic moving average (SWA). We evaluate our training recipe by pre-training LLMs, where high learning rates are inherently preferred due to extremely large batch sizes. Specifically, we pre-trained nanoGPT-2 models of varying sizes\u2014small (125M), medium (335M), and large (770M)\u2014on the OpenWebText dataset, comprised of 9B tokens. Additionally, we present results for publicly available Pythia LLMs, ranging from 1B to 12B, which were trained on the PILE-deduped dataset containing 207B tokens.", "title":"Early Weight Averaging meets High Learning Rates for LLM Pre-training", "authors":[ "Sunny Sanyal", "Atula Tejaswi Neerkaje", "Jean Kaddour", "Abhishek Kumar", "sujay sanghavi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2306.03241", "GitHub":[ "https:\/\/github.com\/sanyalsunny111\/early_weight_avg" ], "paper_page":"https:\/\/huggingface.co\/papers\/2306.03241", "n_linked_authors":0, "upvotes":2, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":210 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Hvq9RtSoHG", "bibtext":"@inproceedings{\nhu2024chainofsymbol,\ntitle={Chain-of-Symbol Prompting For Spatial Reasoning in Large Language Models},\nauthor={Hanxu Hu and Hongyuan Lu and Huajian Zhang and Yun-Ze Song and Wai Lam and Yue Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Hvq9RtSoHG}\n}", "abstract":"While conventional Chain-of-Thought prompting shows promising performance on various language tasks for LLMs, the spatial scenarios are nearly unexplored. In this paper, we first investigate the performance of LLMs on complex spatial planning and understanding tasks that require LLMs to understand a virtual spatial environment simulated via natural language and act or reason correspondingly in text. By evaluating on classic spatial planning scenarios through natural language descriptions, we found that current popular LLMs still lack abilities to handle spatial relationships in texts. This arises a question -- do the natural language is the best way to represent complex spatial environments for LLMs, or maybe other alternatives such as symbolic representations are both more efficient and effective for LLMs? To this end, we propose a novel method called CoS (Chain-of-Symbol Prompting) that represents the spatial relationships with condensed symbols during the chained intermediate thinking steps. CoS is easy to use and does not need additional training on LLMs. Extensive experiments indicate that CoS clearly surpasses the performance of the Chain-of-Thought (CoT) Prompting described in natural langauge in all three spatial planning tasks and existing spatial QA benchmark, with even fewer tokens used in the inputs compared with CoT. The performance gain is strong, by up to 60.8% accuracy (from 31.8% to 92.6%) on Brick World scenarios for GPT-3.5-Turbo. CoS also reduces the number of tokens in the prompt obviously, by up to 65.8% of the tokens (from 407 to 139) for the intermediate steps from demonstrations on the Brick World task. Interestingly, we also observed emergent ability of abstract symbols understanding when the size of models scales up.", "title":"Chain-of-Symbol Prompting For Spatial Reasoning in Large Language Models", "authors":[ "Hanxu Hu", "Hongyuan Lu", "Huajian Zhang", "Yun-Ze Song", "Wai Lam", "Yue Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":211 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Hi8jKh4HE9", "bibtext":"@inproceedings{\nhe2024what,\ntitle={What is in Your Safe Data? Identifying Benign Data that Breaks Safety},\nauthor={Luxi He and Mengzhou Xia and Peter Henderson},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Hi8jKh4HE9}\n}", "abstract":"Current Large Language Models (LLMs), even those tuned for safety and alignment, are susceptible to jailbreaking. Some have found that just further fine-tuning an aligned model with benign data (i.e., data without harmful content) surprisingly leads to substantial degradation in safety. We delve into the data-centric aspects of why benign fine-tuning inadvertently contributes to jailbreaking. First, we represent fine-tuning data through two lenses: representation and gradient spaces. Additionally, we propose a bi-directional anchoring method that, during the selection process, prioritizes data points that are close to harmful examples and far from benign ones. Our approach effectively identifies subsets of benign data that are more likely to degrade the model's safety after fine-tuning.\nTraining on just 100 of these seemingly benign datapoints surprisingly leads to the fine-tuned model affirmatively responding to >70% of tested harmful requests, compared to <20% after fine-tuning on randomly selected data. We also observe that the selected data frequently appear as lists, bullet points, or math questions, indicating a systematic pattern in fine-tuning data that contributes to jailbreaking.", "title":"What is in Your Safe Data? Identifying Benign Data that Breaks Safety", "authors":[ "Luxi He", "Mengzhou Xia", "Peter Henderson" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01099", "GitHub":[ "https:\/\/github.com\/princeton-nlp\/benign-data-breaks-safety" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":212 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=HVK6nl3i97", "bibtext":"@inproceedings{\nsun2024triforce,\ntitle={TriForce: Lossless Acceleration of Long Sequence Generation with Hierarchical Speculative Decoding},\nauthor={Hanshi Sun and Zhuoming Chen and Xinyu Yang and Yuandong Tian and Beidi Chen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=HVK6nl3i97}\n}", "abstract":"With large language models (LLMs) widely deployed in long content generation recently, there has emerged an increasing demand for efficient long-sequence inference support. However, key-value (KV) cache, which is stored to avoid re-computation, has emerged as a critical bottleneck by growing linearly in size with the sequence length. Due to the auto-regressive nature of LLMs, the entire KV cache will be loaded for every generated token, resulting in low utilization of computational cores and high latency. While various compression methods for KV cache have been proposed to alleviate this issue, they suffer from degradation in generation quality. We introduce TriForce, a hierarchical speculative decoding system that is scalable for long sequence generation. This approach leverages the original model weights and dynamic sparse KV cache via retrieval as a draft model, which serves as an intermediate layer in the hierarchy and is further speculated by a smaller model to reduce its drafting latency. TriForce not only facilitates impressive speedups for Llama2-7B-128K, achieving up to 2.31$\\times$ on an A100 GPU but also showcases scalability in handling even longer contexts. For the offloading setting on two RTX 4090 GPUs, TriForce achieves 0.108s\/token\u2014only half as slow as the auto-regressive baseline on an A100, which attains 7.78$\\times$ on our optimized offloading system. Additionally, TriForce performs 4.86$\\times$ than DeepSpeed-Zero-Inference on a single RTX 4090 GPU. TriForce's robustness is highlighted by its consistently outstanding performance across various temperatures. The code is available at https:\/\/github.com\/Infini-AI-Lab\/TriForce.", "title":"TriForce: Lossless Acceleration of Long Sequence Generation with Hierarchical Speculative Decoding", "authors":[ "Hanshi Sun", "Zhuoming Chen", "Xinyu Yang", "Yuandong Tian", "Beidi Chen" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.11912", "GitHub":[ "https:\/\/github.com\/Infini-AI-Lab\/TriForce" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.11912", "n_linked_authors":4, "upvotes":16, "num_comments":1, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":213 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=HLoWN6m4fS", "bibtext":"@inproceedings{\nbordt2024elephants,\ntitle={Elephants Never Forget: Memorization and Learning of Tabular Data in Large Language Models},\nauthor={Sebastian Bordt and Harsha Nori and Vanessa Cristiny Rodrigues Vasconcelos and Besmira Nushi and Rich Caruana},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=HLoWN6m4fS}\n}", "abstract":"While many have shown how Large Language Models (LLMs) can be applied to a diverse set of tasks, the critical issues of data contamination and memorization are often glossed over. In this work, we address this concern for tabular data. Specifically, we introduce a variety of different techniques to assess whether a language model has seen a tabular dataset during training. This investigation reveals that LLMs have memorized many popular tabular datasets verbatim. We then compare the few-shot learning performance of LLMs on datasets that were seen during training to the performance on datasets released after training. We find that LLMs perform better on datasets seen during training, indicating that memorization leads to overfitting. At the same time, LLMs show non-trivial performance on novel datasets and are surprisingly robust to data transformations. We then investigate the in-context statistical learning abilities of LLMs. While LLMs are significantly better than random at solving statistical classification problems, the sample efficiency of few-shot learning lags behind traditional statistical learning algorithms, especially as the dimension of the problem increases. This suggests that much of the observed few-shot performance on novel real-world datasets is due to the LLM's world knowledge. Overall, our results highlight the importance of testing whether an LLM has seen an evaluation dataset during pre-training. We release the https:\/\/github.com\/interpretml\/LLM-Tabular-Memorization-Checker Python package to test LLMs for memorization of tabular datasets.", "title":"Elephants Never Forget: Memorization and Learning of Tabular Data in Large Language Models", "authors":[ "Sebastian Bordt", "Harsha Nori", "Vanessa Cristiny Rodrigues Vasconcelos", "Besmira Nushi", "Rich Caruana" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/interpretml\/llm-tabular-memorization-checker" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":214 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=HDkNbfLQgu", "bibtext":"@inproceedings{\ngolovneva2024reverse,\ntitle={Reverse Training to Nurse the Reversal Curse},\nauthor={Olga Golovneva and Zeyuan Allen-Zhu and Jason E Weston and Sainbayar Sukhbaatar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=HDkNbfLQgu}\n}", "abstract":"Large language models (LLMs) have a surprising failure: when trained on ``A has a feature B``, they do not generalize to ``B is a feature of A``, which is termed the Reversal Curse. Even when training with trillions of tokens this issue still appears due to Zipf's law -- hence even if we train on the entire internet. This work proposes an alternative training scheme, called $reverse$ $training$, whereby all words are used twice, doubling the amount of available tokens. The LLM is trained in both forward and reverse directions by reversing training strings while preserving (i.e., not reversing) chosen substrings, such as entities. We show that data matched reverse-trained models provide superior performance to standard models on standard tasks, and compute matched reverse-trained models provide far superior performance on reversal tasks, helping resolve the reversal curse issue.", "title":"Reverse Training to Nurse the Reversal Curse", "authors":[ "Olga Golovneva", "Zeyuan Allen-Zhu", "Jason E Weston", "Sainbayar Sukhbaatar" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.13799", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.13799", "n_linked_authors":4, "upvotes":13, "num_comments":1, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":215 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=H1Edd5d2JP", "bibtext":"@inproceedings{\njiang2024llmcausal,\ntitle={{LLM}4Causal: Democratized Causal Tools for Everyone via Large Language Model},\nauthor={Haitao Jiang and Lin Ge and Yuhe Gao and Jianian Wang and Rui Song},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=H1Edd5d2JP}\n}", "abstract":"Large Language Models (LLMs) have shown their success in language understanding and reasoning on general topics. However, their capability to perform inference based on user-specified structured data and knowl- edge in corpus-rare concepts, such as causal decision-making is still limited. In this work, we explore the possibility of fine-tuning an open-sourced LLM into LLM4Causal, which can identify the causal task, execute a cor- responding function, and interpret its numerical results based on users\u2019 queries and the provided dataset. Meanwhile, we propose a data gen- eration process for more controllable GPT prompting and present two instruction-tuning datasets: (1) Causal-Retrieval-Bench for causal problem identification and input parameter extraction for causal function calling and (2) Causal-Interpret-Bench for in-context causal interpretation. By conducting end-to-end evaluations and two ablation studies, we showed that LLM4Causal can deliver end-to-end solutions for causal problems and provide easy-to-understand answers, which significantly outperforms the baselines.", "title":"LLM4Causal: Democratized Causal Tools for Everyone via Large Language Model", "authors":[ "Haitao Jiang", "Lin Ge", "Yuhe Gao", "Jianian Wang", "Rui Song" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":216 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=GqDntYTTbk", "bibtext":"@inproceedings{\nzhu2024starlingb,\ntitle={Starling-7B: Improving Helpfulness and Harmlessness with {RLAIF}},\nauthor={Banghua Zhu and Evan Frick and Tianhao Wu and Hanlin Zhu and Karthik Ganesan and Wei-Lin Chiang and Jian Zhang and Jiantao Jiao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=GqDntYTTbk}\n}", "abstract":"This paper presents Starling-7B, the current best-performing 7B chat model on Chatbot Arena, along with its training dataset Nectar, a high-quality preference dataset collected by prompting GPT-4 to rank responses. We propose an internal pairwise rating technique, where the model considers all pairings before providing a ranking decision, leveraging the proven pairwise rating capability of LLMs without the cost of individual pairwise calls. The resulting Nectar dataset comprises 182,954 chat prompts, each with seven responses from various models, ranked by GPT-4, equating to 3.8 million high-quality pairwise comparisons. We introduce Starling-RM-7B and Starling-RM-34B, the reward model suites trained with a K-wise preference loss on Nectar, outperforming pairwise counterparts. We benchmark reward model training pipelines across metrics such as human preference, truthfulness, and safety. Using Nectar and our new training pipeline, we fine-tuned Openchat-3.5 to create Starling-LM-7B, achieving significant performance enhancements on MT-Bench, AlpacaEval, and human evaluation metrics. To facilitate research and understanding of RLHF mechanisms, we open-source the Nectar dataset, the reward models, and the language models.", "title":"Starling-7B: Improving Helpfulness and Harmlessness with RLAIF", "authors":[ "Banghua Zhu", "Evan Frick", "Tianhao Wu", "Hanlin Zhu", "Karthik Ganesan", "Wei-Lin Chiang", "Jian Zhang", "Jiantao Jiao" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":217 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=GMalvQu0XL", "bibtext":"@inproceedings{\nhuang2024raven,\ntitle={{RAVEN}: In-Context Learning with Retrieval-Augmented Encoder-Decoder Language Models},\nauthor={Jie Huang and Wei Ping and Peng Xu and Mohammad Shoeybi and Kevin Chang and Bryan Catanzaro},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=GMalvQu0XL}\n}", "abstract":"In this paper, we investigate the in-context learning ability of retrieval-augmented encoder-decoder language models. We first conduct a comprehensive analysis of existing models and identify their limitations in in-context learning, primarily due to a mismatch between pretraining and inference, as well as a restricted context length. To address these issues, we propose RAVEN, a model that combines retrieval-augmented masked language modeling and prefix language modeling. We further introduce Fusion-in-Context Learning to enhance the few-shot performance by enabling the model to leverage more in-context examples without requiring additional training. Through extensive experiments, we demonstrate that our simple yet effective design significantly improves performance, achieving results comparable to the most advanced language models in certain scenarios, despite having substantially fewer parameters. Our work underscores the potential of retrieval-augmented encoder-decoder language models for in-context learning and encourages further research in this direction.", "title":"RAVEN: In-Context Learning with Retrieval-Augmented Encoder-Decoder Language Models", "authors":[ "Jie Huang", "Wei Ping", "Peng Xu", "Mohammad Shoeybi", "Kevin Chang", "Bryan Catanzaro" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/jeffhj\/raven" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":218 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=GC4mXVfquq", "bibtext":"@inproceedings{\nluo2024jailbreakv,\ntitle={JailBreakV: A Benchmark for Assessing the Robustness of MultiModal Large Language Models against Jailbreak Attacks},\nauthor={Weidi Luo and Siyuan Ma and Xiaogeng Liu and Xiaoyu Guo and Chaowei Xiao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=GC4mXVfquq}\n}", "abstract":"With the rapid advancements in Multimodal Large Language Models (MLLMs), securing these models against malicious inputs while align- ing them with human values has emerged as a critical challenge. In this paper, we investigate an important and unexplored question of whether techniques that successfully jailbreak Large Language Models (LLMs) can be equally effective in jailbreaking MLLMs. To explore this issue, we in- troduce JailBreakV-28K, a pioneering benchmark designed to assess the transferability of LLM jailbreak techniques to MLLMs, thereby evaluat- ing the robustness of MLLMs against diverse jailbreak attacks. Utilizing a dataset of 2, 000 malicious queries that is also proposed in this paper, we generate 20, 000 text-based jailbreak prompts using advanced jailbreak attacks on LLMs, alongside 8, 000 image-based jailbreak inputs from recent MLLMs jailbreak attacks, our comprehensive dataset includes 28, 000 test cases across a spectrum of adversarial scenarios. Our evaluation of 10 open- source MLLMs reveals a notably high Attack Success Rate (ASR) for attacks transferred from LLMs, highlighting a critical vulnerability in MLLMs that stems from their text-processing capabilities. Our findings underscore the urgent need for future research to address alignment vulnerabilities in MLLMs from both textual and visual inputs.", "title":"JailBreakV: A Benchmark for Assessing the Robustness of MultiModal Large Language Models against Jailbreak Attacks", "authors":[ "Weidi Luo", "Siyuan Ma", "Xiaogeng Liu", "Xiaoyu Guo", "Chaowei Xiao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":219 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=G8LaO1P0xv", "bibtext":"@inproceedings{\nsinghal2024a,\ntitle={A Long Way to Go: Investigating Length Correlations in {RLHF}},\nauthor={Prasann Singhal and Tanya Goyal and Jiacheng Xu and Greg Durrett},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=G8LaO1P0xv}\n}", "abstract":"Great success has been reported using Reinforcement Learning from Human Feedback (RLHF) to align large language models, with open preference datasets enabling wider experimentation, particularly for \"helpfulness\" in tasks like dialogue and web question answering. Alongside these improvements, however, RLHF also often drives models to produce longer outputs. This paper demonstrates, on three diverse settings, that optimizing for response length is, much more than previously thought, a significant factor behind RLHF. Studying the strategies RL optimization uses to maximize reward, we find improvements in reward to largely be driven by increasing response length, instead of other features. Indeed, we find that even a *purely* length-based reward reproduces most downstream RLHF improvements over supervised fine-tuned models. Testing a comprehensive set of length-countering interventions, we identify the dominant source of these biases to be reward models, which, by studying training dynamics, we find are non-robust and easily influenced by length biases in preference data.", "title":"A Long Way to Go: Investigating Length Correlations in RLHF", "authors":[ "Prasann Singhal", "Tanya Goyal", "Jiacheng Xu", "Greg Durrett" ], "id":"Conference", "type":"Oral", "arxiv_id":"2310.03716", "GitHub":[ "https:\/\/github.com\/prasanns\/rlhf-length-biases" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.03716", "n_linked_authors":3, "upvotes":9, "num_comments":1, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":220 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=FmhPg4UJ9K", "bibtext":"@inproceedings{\nyang2024counting,\ntitle={Counting Like Transformers: Compiling Temporal Counting Logic Into Softmax Transformers},\nauthor={Andy Yang and David Chiang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=FmhPg4UJ9K}\n}", "abstract":"Deriving formal bounds on the expressivity of transformers, as well as studying transformers that are constructed to implement known algorithms, are both effective methods for better understanding the computational power of transformers. Towards both ends, we introduce the temporal counting logic $\\textbf{K}_t$[#] alongside the RASP variant $\\textbf{C-RASP}$. We show they are equivalent to each other, and that together they are the best-known lower bound on the formal expressivity of future-masked soft attention transformers with unbounded input size. We prove this by showing all $\\textbf{K}_t$[#] formulas can be compiled into these transformers without any additional positional embeddings.", "title":"Counting Like Transformers: Compiling Temporal Counting Logic Into Softmax Transformers", "authors":[ "Andy Yang", "David Chiang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.04393", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":221 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Fkr1yVUb9G", "bibtext":"@inproceedings{\nteehan2024college,\ntitle={Co{LLEG}e: Concept Embedding Generation for Large Language Models},\nauthor={Ryan Teehan and Brenden Lake and Mengye Ren},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Fkr1yVUb9G}\n}", "abstract":"Current language models are unable to quickly learn new concepts on the fly, often requiring a more involved finetuning process to learn robustly. Prompting in-context is not robust to context distractions, and often fails to confer much information about the new concepts. Classic methods for few-shot word learning in NLP, relying on global word vectors, are less applicable to large language models. In this paper, we introduce a novel approach named **CoLLEGe** (**Co**ncept **L**earning with **L**anguage **E**mbedding **Ge**neration) to modernize few-shot concept learning. CoLLEGe is a meta-learning framework capable of generating flexible embeddings for new concepts using a small number of example sentences or definitions. Our primary meta-learning objective is simply to facilitate a language model to make next word predictions in forthcoming sentences, making it compatible with language model pretraining. We design a series of tasks to test new concept learning in challenging real-world scenarios, including new word acquisition, definition inference, and verbal reasoning, and demonstrate that our method succeeds in each setting **without task-specific training**. Code and data for our project can be found at [https:\/\/college-concept-learning.github.io\/](https:\/\/college-concept-learning.github.io\/).", "title":"CoLLEGe: Concept Embedding Generation for Large Language Models", "authors":[ "Ryan Teehan", "Brenden Lake", "Mengye Ren" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":222 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=FgHpT6u7pk", "bibtext":"@inproceedings{\ngao2024coca,\ntitle={Co{CA}: Regaining Safety-awareness of Multimodal Large Language Models with Constitutional Calibration},\nauthor={Jiahui Gao and Renjie Pi and Tianyang Han and Han Wu and Lanqing HONG and Lingpeng Kong and Xin Jiang and Zhenguo Li},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=FgHpT6u7pk}\n}", "abstract":"The deployment of multimodal large language models (MLLMs) has demonstrated remarkable success in engaging in conversations involving visual inputs, thanks to the superior power of large language models (LLMs). Those MLLMs are typically built based on the LLMs, with an image encoder to process images into the token embedding space of the LLMs. However, the integration of visual modality has introduced a unique vulnerability: the MLLM becomes susceptible to malicious visual inputs and prone to generating sensitive or harmful responses, even though the LLM has been trained on textual dataset to align with human value. In this paper, we first raise the following question: ``Do the MLLMs possess safety-awareness against malicious image inputs?\". We find that after adding a principle that specifies the safety requirement into the input of the MLLM, the model's safety awareness becomes boosted. This phenomenon verifies the existence of MLLM's safety-awareness against image inputs, it is only weakened by the modality gap. We then introduce a simple yet effective technique termed CoCA, which amplifies the safety-awareness of the MLLM by calibrating its output distribution. Our proposed strategy helps the model reclaim its original safety awareness without losing its original capabilities. We verify the effectiveness of our approach on both multimodal safety and understanding benchmarks.", "title":"CoCA: Regaining Safety-awareness of Multimodal Large Language Models with Constitutional Calibration", "authors":[ "Jiahui Gao", "Renjie Pi", "Tianyang Han", "Han Wu", "Lanqing HONG", "Lingpeng Kong", "Xin Jiang", "Zhenguo Li" ], "id":"Conference", "type":"Poster", "arxiv_id":"2409.11365", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":223 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=FbhjirzvJG", "bibtext":"@inproceedings{\nankner2024hydra,\ntitle={Hydra: Sequentially-Dependent Draft Heads for Medusa Decoding},\nauthor={Zachary Ankner and Rishab Parthasarathy and Aniruddha Nrusimha and Christopher Rinard and Jonathan Ragan-Kelley and William Brandon},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=FbhjirzvJG}\n}", "abstract":"To combat the memory bandwidth-bound nature of autoregressive LLM inference, previous research has proposed the speculative decoding framework. To perform speculative decoding, a small draft model proposes candidate continuations of the input sequence that are then verified in parallel by the base model. One way to specify the draft model, as used in the recent Medusa decoding framework, is as a collection of lightweight heads, called draft heads, that operate on the base model's hidden states. To date, all existing draft heads have been sequentially independent, meaning that they speculate tokens in the candidate continuation independently of any preceding tokens in the candidate continuation. In this work, we propose Hydra heads: a sequentially-dependent drop-in replacement for standard draft heads that significantly improves the accuracy of draft head speculation. We further explore the design space of Hydra head training objectives and architectures, and propose a carefully tuned Hydra head recipe, which we call Hydra++, that improves decoding throughput by up to 1.31x and 2.70x compared to Medusa decoding and autoregressive decoding respectively. Overall, Hydra heads are a simple and well-motivated intervention on standard draft heads that significantly improve the end-to-end speed of draft head-based speculative decoding. We make our code publicly available at https:\/\/github.com\/zankner\/Hydra.", "title":"Hydra: Sequentially-Dependent Draft Heads for Medusa Decoding", "authors":[ "Zachary Ankner", "Rishab Parthasarathy", "Aniruddha Nrusimha", "Christopher Rinard", "Jonathan Ragan-Kelley", "William Brandon" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.05109", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2402.05109", "n_linked_authors":2, "upvotes":0, "num_comments":1, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":224 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=FX4fUThO9H", "bibtext":"@inproceedings{\nyang2024model,\ntitle={Model Autophagy Analysis to Explicate Self-consumption within Human-{AI} Interactions},\nauthor={Shu Yang and Muhammad Asif Ali and Lu Yu and Lijie Hu and Di Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=FX4fUThO9H}\n}", "abstract":"The increasing significance of large models and their multi-modal variants\nin societal information processing has ignited debates on social safety and\nethics. However, there exists a paucity of comprehensive analysis for: (i)\nthe interactions between human and artificial intelligence systems, and\n(ii) understanding and addressing the associated limitations. To bridge\nthis gap, we present Model Autophagy Analysis for large models\u2019 self\u0002consumption explanation. We employ two distinct autophagous loops\n(referred to as \u201cself-consumption loops\u201d) to elucidate the suppression of\nhuman-generated information in the exchange between human and AI\nsystems. Through comprehensive experiments on diverse datasets, we\nevaluate the capacities of generated models as both creators and dissemina\u0002tors of information. Our key findings reveal (i) A progressive prevalence of\nmodel-generated synthetic information over time within training datasets\ncompared to human-generated information; (ii) The discernible tendency\nof large models, when acting as information transmitters across multiple\niterations, to selectively modify or prioritize specific contents; and (iii) The\npotential for a reduction in the diversity of socially or human-generated\ninformation, leading to bottlenecks in the performance enhancement of\nlarge models and confining them to local optima.", "title":"Model Autophagy Analysis to Explicate Self-consumption within Human-AI Interactions", "authors":[ "Shu Yang", "Muhammad Asif Ali", "Lu Yu", "Lijie Hu", "Di Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":225 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=F9tqgOPXH5", "bibtext":"@inproceedings{\nzala2024envgen,\ntitle={EnvGen: Generating and Adapting Environments via {LLM}s for Training Embodied Agents},\nauthor={Abhay Zala and Jaemin Cho and Han Lin and Jaehong Yoon and Mohit Bansal},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=F9tqgOPXH5}\n}", "abstract":"Recent state-of-the-art approaches for embodied learning via interaction directly employ large language models (LLMs) as agents to determine the next steps in an environment. Due to their world knowledge and reasoning capabilities, LLM agents achieve stronger performance than previous smaller agents based on reinforcement learning (RL); however, frequently calling LLMs is slow and expensive. This begs an interesting question: Instead of directly employing LLMs as embodied agents, can we use LLMs\u2019 reasoning capabilities to adaptively create training environments to help smaller embodied RL agents learn useful skills that they are weak at? In this work, we propose EnvGen, a novel framework to address this question. First, we prompt an LLM to generate training environments that allow agents to quickly learn different tasks in parallel. Concretely, the LLM is given the task description and environment simulator objectives that the agents should learn and is then asked to generate a set of environment configurations (e.g., different terrains, items initially given to agents, chances of finding certain objects, etc.). Next, we train a small RL agent in a mixture of the original and LLM-generated environments. Then, we enable the LLM to continuously adapt the generated environments to progressively improve the skills that the agent is weak at, by providing feedback to the LLM in the form of the agent\u2019s performance. We demonstrate the usefulness of EnvGen with comprehensive experiments in Crafter and Heist game environments. We find that a small RL agent trained with EnvGen can outperform SOTA methods, including a GPT-4 agent, and learns long-horizon tasks significantly faster. We also show that using an LLM to adapt environments dynamically outperforms curriculum learning approaches and how the LLM adapts training environments to help improve RL agents\u2019 weaker skills over time. Additionally, EnvGen is substantially more efficient as it only uses a small number of LLM calls (e.g., 4 in total), whereas LLM agents require one or more LLM calls per step (resulting in thousands of LLM calls per episode). We also present detailed analyses of EnvGen\u2019s design choices.", "title":"EnvGen: Generating and Adapting Environments via LLMs for Training Embodied Agents", "authors":[ "Abhay Zala", "Jaemin Cho", "Han Lin", "Jaehong Yoon", "Mohit Bansal" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.12014", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.12014", "n_linked_authors":2, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":226 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=F7aAhfitX6", "bibtext":"@inproceedings{\nsun2024massive,\ntitle={Massive Activations in Large Language Models},\nauthor={Mingjie Sun and Xinlei Chen and J Zico Kolter and Zhuang Liu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=F7aAhfitX6}\n}", "abstract":"We observe an empirical phenomenon in Large Language Models (LLMs) -- very few activations exhibit significantly larger values than others (e.g., 100,000 times larger). We call them massive activations. First, we demonstrate the widespread existence of massive activations across various LLMs and characterize their locations. Second, we find their values largely stay constant regardless of the input, and they function as indispensable bias terms in LLMs. Third, these massive activations lead to the concentration of attention probabilities to their corresponding tokens, and further, implicit bias terms in the self-attention output. Last, we also study massive activations in Vision Transformers.", "title":"Massive Activations in Large Language Models", "authors":[ "Mingjie Sun", "Xinlei Chen", "J Zico Kolter", "Zhuang Liu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.17762", "GitHub":[ "https:\/\/github.com\/locuslab\/massive-activations" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":227 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=F2yGbwXJAi", "bibtext":"@inproceedings{\nguo2024suspicion,\ntitle={Suspicion Agent: Playing Imperfect Information Games with Theory of Mind Aware {GPT}-4},\nauthor={Jiaxian Guo and Bo Yang and Paul Yoo and Bill Yuchen Lin and Yusuke Iwasawa and Yutaka Matsuo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=F2yGbwXJAi}\n}", "abstract":"Unlike perfect information games, where all elements are known to every player, imperfect information games emulate the real-world complexities of decision-making under uncertain or incomplete information. \nGPT-4, the recent breakthrough in large language models (LLMs) trained on massive passive data, is notable for its knowledge retrieval and reasoning abilities. This paper delves into the applicability of GPT-4's learned knowledge for imperfect information games. \nTo achieve this, we introduce \\textbf{\\agentname{}}, an innovative agent that leverages GPT-4's capabilities for imperfect information games. With proper prompt engineering to achieve different functions, \\agentname{} based on GPT-4 demonstrates remarkable adaptability across a range of imperfect information card games. Importantly, GPT-4 displays a strong high-order theory of mind (ToM) capacity, meaning it can understand others and intentionally impact others' behavior. Leveraging this, we design a planning strategy that enables GPT-4 to competently play against different opponents, adapting its gameplay style as needed, while requiring only the game rules and descriptions of observations as input.\nIn the experiments, we qualitatively showcase the capabilities of \\agentname{} across three different imperfect information games and then quantitatively evaluate it in Leduc Hold'em. {As an exploration study, we show that \\agentname{} can potentially outperform traditional algorithms without any specialized training or examples, but still cannot beat Nash-Equilibrium algorithms}. In order to encourage and foster deeper insights within the community, we make our game-related data publicly available.", "title":"Suspicion Agent: Playing Imperfect Information Games with Theory of Mind Aware GPT-4", "authors":[ "Jiaxian Guo", "Bo Yang", "Paul Yoo", "Bill Yuchen Lin", "Yusuke Iwasawa", "Yutaka Matsuo" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cr-gjx\/suspicion-agent" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":228 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Ecgev5ZZpq", "bibtext":"@inproceedings{\nyu2024evaluating,\ntitle={Evaluating the Adversarial Robustness of Retrieval-Based In-Context Learning for Large Language Models},\nauthor={Simon Chi Lok Yu and Jie He and Pasquale Minervini and Jeff Z. Pan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Ecgev5ZZpq}\n}", "abstract":"With the emergence of large language models, such as LLaMA and OpenAI GPT-3, In-Context Learning (ICL) gained significant attention due to its effectiveness and efficiency. However, ICL is very sensitive to the choice, order, and verbaliser used to encode the demonstrations in the prompt. \\emph{Retrieval-Augmented ICL} methods try to address this problem by leveraging retrievers to extract semantically related examples as demonstrations. While this approach yields more accurate results, its robustness against various types of adversarial attacks, including perturbations on test samples, demonstrations, and retrieved data, remains under-explored. Our study reveals that retrieval-augmented models can enhance robustness against test sample attacks, outperforming vanilla ICL with a 4.87\\% reduction in Attack Success Rate (ASR); however, they exhibit overconfidence in the demonstrations, leading to a 2\\% increase in ASR for demonstration attacks. Adversarial training can help improve the robustness of ICL methods to adversarial attacks; however, such a training scheme can be too costly in the context of LLMs. As an alternative, we introduce an effective training-free adversarial defence method, \\emph{DARD}, which enriches the example pool with those attacked samples. We show that DARD yields improvements in performance and robustness, achieving a 15\\% reduction in ASR over the baselines. Code and data are available jointly with this submission as supplementary material.", "title":"Evaluating the Adversarial Robustness of Retrieval-Based In-Context Learning for Large Language Models", "authors":[ "Simon Chi Lok Yu", "Jie He", "Pasquale Minervini", "Jeff Z. Pan" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/simonucl\/adv-retreival-icl" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":229 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=EKBPn7no4y", "bibtext":"@inproceedings{\nzhuang2024structlm,\ntitle={Struct{LM}: Towards Building Generalist Models for Structured Knowledge Grounding},\nauthor={Alex Zhuang and Ge Zhang and Tianyu Zheng and Xinrun Du and Junjie Wang and Weiming Ren and Wenhao Huang and Jie Fu and Xiang Yue and Wenhu Chen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=EKBPn7no4y}\n}", "abstract":"Structured data sources, such as tables, graphs, and databases, are ubiquitous knowledge sources. Despite the demonstrated capabilities of large language models (LLMs) on plain text, their proficiency in interpreting and utilizing structured data remains limited. Our investigation reveals a notable deficiency in LLMs' ability to process structured data, e.g., ChatGPT lags behind state-of-the-art (SoTA) model by an average of 35\\%. To augment the Structured Knowledge Grounding (SKG) capabilities in LLMs, we have developed a comprehensive instruction tuning dataset comprising 1.1 million examples. Utilizing this dataset, we train a series of models, referred to as $\\texttt{structlm}$, based on Mistral and the CodeLlama model family, ranging from 7B to 34B parameters. Our $\\texttt{structlm}$ series surpasses task-specific models~\\citep{UnifiedSKG2022} on 16 out of 18 evaluated datasets and establishes new SoTA performance on 8 SKG tasks. Furthermore, $\\texttt{structlm}$ demonstrates strong generalization across 6 novel held-out SKG tasks, outperforming TableLlama by an average of 35\\% and Flan-UL2 20B by an average of 10\\%. Contrary to expectations, we observe that scaling model size offers marginal benefits, with $\\texttt{structlm}$-34B showing only slight improvements over $\\texttt{structlm}$-7B. This suggests that structured knowledge grounding is still a challenging task and requires more innovative design to push to a new level. We release the model weights and training dataset to the community, along with relevant code on Github.", "title":"StructLM: Towards Building Generalist Models for Structured Knowledge Grounding", "authors":[ "Alex Zhuang", "Ge Zhang", "Tianyu Zheng", "Xinrun Du", "Junjie Wang", "Weiming Ren", "Wenhao Huang", "Jie Fu", "Xiang Yue", "Wenhu Chen" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":230 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=EIjJ6ykPnh", "bibtext":"@inproceedings{\nsinghal2024dpo,\ntitle={D2{PO}: Discriminator-Guided {DPO} with Response Evaluation Models},\nauthor={Prasann Singhal and Nathan Lambert and Scott Niekum and Tanya Goyal and Greg Durrett},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=EIjJ6ykPnh}\n}", "abstract":"Varied approaches for aligning language models have been proposed, including supervised fine-tuning, RLHF, and direct optimization methods such as DPO. Although DPO has rapidly gained popularity due to its straightforward training process and competitive results, there is an open question of whether there remain practical advantages of using a discriminator, such as a reward model, to evaluate responses. We propose D2PO, discriminator-guided DPO, an approach for the online setting where preferences are being collected throughout learning. As we collect gold preferences, we use these not only to train our policy, but to train a discriminative response evaluation model to silver-label even more synthetic data for policy training. We explore this approach across a set of diverse tasks, including a realistic chat setting, and we find that our approach can lead to higher-quality outputs compared to DPO with the same data budget, and greater efficiency in terms of preference data requirements. Furthermore, we show that our silver labeling is most helpful when training the policy with DPO, outperforming traditional PPO, and benefits from maintaining a separate discriminator from the policy model.", "title":"D2PO: Discriminator-Guided DPO with Response Evaluation Models", "authors":[ "Prasann Singhal", "Nathan Lambert", "Scott Niekum", "Tanya Goyal", "Greg Durrett" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.01511", "GitHub":[ "https:\/\/github.com\/PrasannS\/d2po" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.01511", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":231 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=EHPns3hVkj", "bibtext":"@inproceedings{\nalves2024tower,\ntitle={Tower: An Open Multilingual Large Language Model for Translation-Related Tasks},\nauthor={Duarte Miguel Alves and Jos{\\'e} Pombal and Nuno M Guerreiro and Pedro Henrique Martins and Jo{\\~a}o Alves and Amin Farajian and Ben Peters and Ricardo Rei and Patrick Fernandes and Sweta Agrawal and Pierre Colombo and Jos{\\'e} G. C. de Souza and Andre Martins},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=EHPns3hVkj}\n}", "abstract":"While general-purpose large language models (LLMs) demonstrate proficiency on multiple tasks within the domain of translation, approaches based on open LLMs are competitive only when specializing on a single task. In this paper, we propose a recipe for tailoring LLMs to multiple tasks present in translation workflows. We perform continued pretraining on a multilingual mixture of monolingual and parallel data, creating TowerBase, followed by finetuning on instructions relevant for translation processes, creating TowerInstruct. Our model surpasses open alternatives on several relevant tasks and is competitive with general-purpose closed LLMs. We will release the Tower models, our specialization dataset, an evaluation framework for LLMs focusing on the translation ecosystem, and a collection of model generations on our benchmark.", "title":"Tower: An Open Multilingual Large Language Model for Translation-Related Tasks", "authors":[ "Duarte Miguel Alves", "Jos\u00e9 Pombal", "Nuno M Guerreiro", "Pedro Henrique Martins", "Jo\u00e3o Alves", "Amin Farajian", "Ben Peters", "Ricardo Rei", "Patrick Fernandes", "Sweta Agrawal", "Pierre Colombo", "Jos\u00e9 G. C. de Souza", "Andre Martins" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/epfllm\/megatron-llm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":232 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=EEPBOB2Xww", "bibtext":"@inproceedings{\nzhang2024ferretv,\ntitle={Ferret-v2: An Improved Baseline for Referring and Grounding with Large Language Models},\nauthor={Haotian Zhang and Haoxuan You and Philipp Dufter and Bowen Zhang and Chen Chen and Hong-You Chen and Tsu-Jui Fu and William Yang Wang and Shih-Fu Chang and Zhe Gan and Yinfei Yang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=EEPBOB2Xww}\n}", "abstract":"While Ferret seamlessly integrates regional understanding into the Large Language Model (LLM) to facilitate its referring and grounding capability, it poses certain limitations: constrained by the pre-trained fixed visual encoder and failed to perform well on broader tasks. In this work, we unveil Ferret-v2, a significant upgrade to Ferret, with three key designs. (1) Any resolution grounding and referring: A flexible approach that effortlessly handles higher image resolution, improving the model's ability to process and understand images in greater detail. (2) Multi-granularity visual encoding: By integrating the additional DINOv2 encoder, the model learns better and diverse underlying contexts for global and fine-grained visual information. (3) A three-stage training paradigm: Besides image-caption alignment, an additional stage is proposed for high-resolution dense alignment before the final instruction tuning. Experiments show that Ferret-v2 provides substantial improvements over Ferret and other state-of-the-art methods, thanks to its high-resolution scaling and fine-grained visual processing.", "title":"Ferret-v2: An Improved Baseline for Referring and Grounding with Large Language Models", "authors":[ "Haotian Zhang", "Haoxuan You", "Philipp Dufter", "Bowen Zhang", "Chen Chen", "Hong-You Chen", "Tsu-Jui Fu", "William Yang Wang", "Shih-Fu Chang", "Zhe Gan", "Yinfei Yang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":233 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Dt6qXZsgaU", "bibtext":"@inproceedings{\nzhao2024selfguide,\ntitle={Self-Guide: Better Task-Specific Instruction Following via Self-Synthetic Finetuning},\nauthor={Chenyang Zhao and Xueying Jia and Vijay Viswanathan and Graham Neubig and Tongshuang Wu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Dt6qXZsgaU}\n}", "abstract":"Large language models (LLMs) hold the promise of solving diverse tasks when provided with appropriate natural language prompts. However, prompting often leads models to make predictions with lower accuracy compared to finetuning a model with ample training data. On the other hand, while finetuning LLMs on task-specific data generally improves their performance, abundant annotated datasets are not available for all tasks. Previous work has explored generating task-specific data from state-of-the-art LLMs and using this data to finetune smaller models, but this approach requires access to a language model other than the one being trained, which introduces cost, scalability challenges, and legal hurdles associated with continuously relying on more powerful LLMs. In response to these, we propose Self-Guide, a multi-stage mechanism in which we synthesize task-specific input-output pairs from the student LLM, then use these input-output pairs to finetune the student LLM itself. In our empirical evaluation of the Natural Instructions V2 benchmark, we find that Self-Guide improves the performance of LLM by a substantial margin. Specifically, we report an absolute improvement of approximately 15% for classification tasks and 18% for generation tasks in the benchmark's metrics. This sheds light on the promise of self-synthesized data guiding LLMs towards becoming task-specific experts without any external learning signals.", "title":"Self-Guide: Better Task-Specific Instruction Following via Self-Synthetic Finetuning", "authors":[ "Chenyang Zhao", "Xueying Jia", "Vijay Viswanathan", "Graham Neubig", "Tongshuang Wu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhaochenyang20\/Prompt2Model-Self-Guide" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":234 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=DomBynQsqt", "bibtext":"@inproceedings{\nzhu2024mdiffusion,\ntitle={3M-Diffusion: Latent Multi-Modal Diffusion for Language-Guided Molecular Structure Generation},\nauthor={Huaisheng Zhu and Teng Xiao and Vasant G Honavar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=DomBynQsqt}\n}", "abstract":"Generating molecular structures with desired properties is a critical task with broad applications in drug discovery and materials design. We propose 3M-Diffusion, a novel multi-modal molecular graph generation method, to generate diverse, ideally novel molecular structures with desired properties. 3M-Diffusion encodes molecular graphs into a graph latent space which it then aligns with the text space learned by encoder-based LLMs from textual descriptions. It then reconstructs the molecular structure and atomic attributes based on the given text descriptions using the molecule decoder. It then learns a probabilistic mapping from the text space to the latent molecular graph space using a diffusion model. The results of our extensive experiments on several datasets demonstrate that 3M-Diffusion can generate high-quality, novel and diverse molecular graphs that semantically match the textual description provided.", "title":"3M-Diffusion: Latent Multi-Modal Diffusion for Language-Guided Molecular Structure Generation", "authors":[ "Huaisheng Zhu", "Teng Xiao", "Vasant G Honavar" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/huaishengzhu\/3mdiffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":235 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=DbsLm2KAqP", "bibtext":"@inproceedings{\nli2024culturegen,\ntitle={{CULTURE}-{GEN}: Revealing Global Cultural Perception in Language Models through Natural Language Prompting},\nauthor={Huihan Li and Liwei Jiang and Nouha Dziri and Xiang Ren and Yejin Choi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=DbsLm2KAqP}\n}", "abstract":"As the utilization of large language models (LLMs) has proliferated world-wide, it is crucial for them to have adequate knowledge and fair representation for diverse global cultures. In this work, we uncover culture perceptions of three SOTA models on 110 countries and regions on 8 culture-related topics through culture-conditioned generations, and extract symbols from these generations that are associated to each culture by the LLM. We discover that culture-conditioned generation consist of linguistic \u201cmarkers\u201d that distinguish marginalized cultures apart from default cultures. We also discover that LLMs have an uneven degree of diversity in the culture symbols, and that cultures from different geographic regions have different presence in LLMs\u2019 culture-agnostic generation. Our findings promote further research in studying the knowledge and fairness of global culture perception in LLMs.", "title":"CULTURE-GEN: Revealing Global Cultural Perception in Language Models through Natural Language Prompting", "authors":[ "Huihan Li", "Liwei Jiang", "Nouha Dziri", "Xiang Ren", "Yejin Choi" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.10199", "GitHub":[ "https:\/\/github.com\/huihanlhh\/culture-gen" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":236 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=DRffhKBVlE", "bibtext":"@inproceedings{\nli2024lite,\ntitle={{LITE}: Modeling Environmental Ecosystems with Multimodal Large Language Models},\nauthor={Haoran Li and Junqi Liu and Zexian Wang and Shiyuan Luo and Xiaowei Jia and Huaxiu Yao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=DRffhKBVlE}\n}", "abstract":"The modeling of environmental ecosystems plays a pivotal role in the sustainable management of our planet. Accurate prediction of key environmental variables over space and time can aid in informed policy and decision-making, thus improving people's livelihood. Recently, deep learning-based methods have shown promise in modeling the spatial-temporal relationships for predicting environmental variables. However, these approaches often fall short in handling incomplete features and distribution shifts, which are commonly observed in environmental data due to the substantial cost of data collection and malfunctions in measuring instruments. To address these issues, we propose LITE -- a multimodal large language model for environmental ecosystems modeling. Specifically, LITE unifies different environmental variables by transforming them into natural language descriptions and line graph images. Then, LITE utilizes unified encoders to capture spatial-temporal dynamics and correlations in different modalities. During this step, the incomplete features are imputed by a sparse Mixture-of-Experts framework, and the distribution shift is handled by incorporating multi-granularity information from past observations. Finally, guided by domain instructions, a language model is employed to fuse the multimodal representations for the prediction. Our experiments demonstrate that LITE significantly enhances performance in environmental spatial-temporal prediction across different domains compared to the best baseline, with a 41.25\\% reduction in prediction error. This justifies its effectiveness.", "title":"LITE: Modeling Environmental Ecosystems with Multimodal Large Language Models", "authors":[ "Haoran Li", "Junqi Liu", "Zexian Wang", "Shiyuan Luo", "Xiaowei Jia", "Huaxiu Yao" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01165", "GitHub":[ "https:\/\/github.com\/hrlics\/lite" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":237 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=DOMP5AgwQz", "bibtext":"@inproceedings{\nhuang2024ctikg,\ntitle={{CTIKG}: {LLM}-Powered Knowledge Graph Construction from Cyber Threat Intelligence},\nauthor={Liangyi Huang and Xusheng Xiao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=DOMP5AgwQz}\n}", "abstract":"To gain visibility into evolving threat landscape, knowledge of cyber threats has been aggressively collected across organizations and is often shared through Cyber Threat Intelligence (CTI). While knowledge of CTI can be shared via structured format such as Indicators of Compromise (IOC), articles in technical blogs and posts in forums (referred to as CTI articles) provide more comprehensive descriptions of the observed real-world at- tacks. However, existing works can only analyze standard texts from mainstream cyber threat knowledge bases such as CVE and NVD, and lack of the capability to link multiple CTI articles to uncover the relationships among security-related entities such as vulnerabilities. In this paper, we propose a novel approach, CTIKG, that utilizes prompt engineering to efficiently build a security-oriented knowledge graph from CTI articles based on LLMs. To mitigate the challenges of LLMs in randomness, hallucinations and tokens limitation, CTIKG divides an article into segments and employs multiple LLM agents with dual memory design to (1) process each text segment separately and (2) summarize the results of the text segments to generate more accurate results. We evaluate CTIKG on two representative benchmarks built from real world CTI articles, and the results show that CTIKG achieves 86.88% precision in building security-oriented knowledge graphs, achieving at least 30% improvements over the state-of-the-art techniques. We also demonstrate that the retry mechanism makes open source language models outperform GPT4 for building knowledge graphs.", "title":"CTIKG: LLM-Powered Knowledge Graph Construction from Cyber Threat Intelligence", "authors":[ "Liangyi Huang", "Xusheng Xiao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":238 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=DMUGTMWrKZ", "bibtext":"@inproceedings{\nzhao2024enhancing,\ntitle={Enhancing Adversarial Robustness of {LLM}s with Analytic Hierarchy Process},\nauthor={Jiahao Zhao and Minzheng Wang and Nan Xu and YinLuo and Wenji Mao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=DMUGTMWrKZ}\n}", "abstract":"With the increasing impact of large language models (LLMs) across diverse applications, ensuring the robustness of LLMs has become a pressing concern. Existing defense strategies are tailored to specific attack scenarios, which typically require high-cost model training and cannot rapidly respond to new threats. To tackle this issue, we conceptualize the defense strategy in LLMs as a cognitive process for dealing with complex user queries. Intuitively, faced with a spectrum of queries that potentially contain malicious perturbations, LLMs need human-like discernment to avoid being misled. Drawing inspiration from cognitive theory, we introduce an innovative Analytic Hierarchy Process (AHP) inference framework. Our methodology involves decomposing intricate tasks into manageable subtasks, prioritizing them, and systematically addressing each step. Our framework is based on AI feedback, eliminating the necessity for training and optimization. We evaluate the effectiveness of our framework in jailbreak attacks and robustness in downstream tasks using representative LLMs, including GPT-3.5 and Llama2. The experimental results demonstrate that our proposed framework significantly enhances the adversarial robustness of LLMs.", "title":"Enhancing Adversarial Robustness of LLMs with Analytic Hierarchy Process", "authors":[ "Jiahao Zhao", "Minzheng Wang", "Nan Xu", "YinLuo", "Wenji Mao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":239 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=D06yk3DBas", "bibtext":"@inproceedings{\ncassano2024can,\ntitle={Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions},\nauthor={Federico Cassano and Luisa Li and Akul Sethi and Noah Shinn and Abby Brennan-Jones and Jacob Ginesin and Edward Berman and George Chakhnashvili and Anton Lozhkov and Carolyn Jane Anderson and Arjun Guha},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=D06yk3DBas}\n}", "abstract":"A significant amount of research is focused on developing and evaluating\nlarge language models for a variety of code synthesis tasks. These include\nsynthesizing code from natural language, synthesizing tests from\ncode, and synthesizing explanations of code. In contrast, the behavior of \ninstructional code editing with LLMs is understudied.\nThese are tasks in which the model is provided a block of code and an instruction to modify the code. \nThe editing instruction may ask for a feature to be added or removed, describe a bug and ask\nfor a fix, or ask for a different kind of solution.\nWe introduce a carefully crafted benchmark of code editing tasks and use it\nto evaluate several cutting edge LLMs. Our evaluation exposes a significant gap\nbetween the capabilities of state-of-the-art open and closed models. For\nexample, even GPT-3.5-Turbo is better than the best open model at\ncode editing tasks. We also introduce a new, carefully curated, permissively licensed training dataset of code editing tasks\ncoupled with natural language instructions.\nUsing this training dataset, we show that we can fine-tune open Code LLMs to significantly\nimprove their code editing capabilities,\nclosing the gap between open and closed models.\nAll code, data, and models are available at https:\/\/github.com\/nuprl\/CanItEdit.", "title":"Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions", "authors":[ "Federico Cassano", "Luisa Li", "Akul Sethi", "Noah Shinn", "Abby Brennan-Jones", "Jacob Ginesin", "Edward Berman", "George Chakhnashvili", "Anton Lozhkov", "Carolyn Jane Anderson", "Arjun Guha" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/nuprl\/canitedit" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":240 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=CybBmzWBX0", "bibtext":"@inproceedings{\ndubois2024lengthcontrolled,\ntitle={Length-Controlled AlpacaEval: A Simple Debiasing of Automatic Evaluators},\nauthor={Yann Dubois and Percy Liang and Tatsunori Hashimoto},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=CybBmzWBX0}\n}", "abstract":"LLM-based auto-annotators have become a key component of the LLM development process due to their cost-effectiveness and scalability compared to human-based evaluation. \nHowever, these auto-annotators can introduce complex biases that are hard to remove. Even simple, known confounders such as preference for longer outputs remains in existing automated evaluation metrics.\nWe propose a simple regression analysis approach for controlling biases in auto-evaluations. \nAs a real case study, we focus on reducing the length bias of AlpacaEval, a fast and affordable benchmark for instruction-following LLMs that uses LLMs to estimate response quality.\nDespite being highly correlated with human preferences, AlpacaEval is known to favor models that generate longer outputs.\nWe introduce a length-controlled AlpacaEval that aims to answer the counterfactual question: \"What would the preference be if the model's and baseline's output had the same length?\" \nTo achieve this, we first fit a GLM to predict the biased output of interest (auto-annotator preferences) based on the mediators we want to control for (length difference) and other relevant features. \nWe then obtain length-controlled preferences by predicting preferences while conditioning the GLM with a zero difference in lengths. \nLength-controlling not only improves the robustness of the metric to manipulations in model verbosity, we also find that it increases the Spearman correlation with LMSYS' Chatbot Arena from 0.94 to 0.98. \nWe release \\thecode{} and \\leaderboard{}.", "title":"Length-Controlled AlpacaEval: A Simple Debiasing of Automatic Evaluators", "authors":[ "Yann Dubois", "Percy Liang", "Tatsunori Hashimoto" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":241 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=CrzAj0kZjR", "bibtext":"@inproceedings{\nandukuri2024stargate,\ntitle={{ST}aR-{GATE}: Teaching Language Models to Ask Clarifying Questions},\nauthor={Chinmaya Andukuri and Jan-Philipp Fr{\\\"a}nken and Tobias Gerstenberg and Noah Goodman},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=CrzAj0kZjR}\n}", "abstract":"When prompting language models to complete a task, users often leave important aspects unsaid. While asking questions could resolve this ambiguity (GATE; Li et al., 2023), models often struggle to ask good questions. We explore a language model's ability to self-improve (STaR; Zelikman et al., 2022) by rewarding the model for generating useful questions\u2014a simple method we dub STaR-GATE. We generate a synthetic dataset of 25,500 unique persona-task prompts to simulate conversations between a pretrained language model\u2014the $\\texttt{Questioner}$\u2014and a $\\texttt{Roleplayer}$ whose preferences are unknown to the $\\texttt{Questioner}$. By asking questions, the $\\texttt{Questioner}$ elicits preferences from the $\\texttt{Roleplayer}$. The $\\texttt{Questioner}$ is iteratively finetuned on questions that increase the probability of high-quality responses to the task, which are generated by an $\\texttt{Oracle}$ with access to the $\\texttt{Roleplayer}$'s latent preferences. After two iterations of self-improvement, the $\\texttt{Questioner}$ asks better questions, allowing it to generate responses that are preferred over responses from the initial model on $\\textbf{72}$% of tasks. Our results indicate that teaching a language model to ask better questions leads to better personalized responses.", "title":"STaR-GATE: Teaching Language Models to Ask Clarifying Questions", "authors":[ "Chinmaya Andukuri", "Jan-Philipp Fr\u00e4nken", "Tobias Gerstenberg", "Noah Goodman" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.19154", "GitHub":[ "https:\/\/github.com\/scandukuri\/assistant-gate" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.19154", "n_linked_authors":2, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ "scandukuri\/mistral-stargate", "scandukuri\/mistral-stargate-m1", "scandukuri\/llama3-8b-stargate-m1", "RichardErkhov\/scandukuri_-_llama3-8b-stargate-m1-gguf" ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":242 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=CI7D2kiih1", "bibtext":"@inproceedings{\nzayed2024should,\ntitle={Should We Attend More or Less? Modulating Attention for Fairness},\nauthor={Abdelrahman Zayed and Goncalo Mordido and Samira Shabanian and Sarath Chandar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=CI7D2kiih1}\n}", "abstract":"The advances in natural language processing (NLP) pose both opportunities and challenges. While recent progress enables the development of high-performing models for a variety of tasks, it also poses the risk of models learning harmful biases from the data, such as gender stereotypes. In this work, we investigate the role of attention, a widely-used technique in current state-of-the-art NLP models, in the propagation of social biases. Specifically, we study the relationship between the entropy of the attention distribution and the model's performance and fairness. We then propose a novel method for modulating attention weights to improve model fairness after training. Since our method is only applied post-training and pre-inference, it is an intra-processing method and is, therefore, less computationally expensive than existing in-processing and pre-processing approaches. Our results show an increase in fairness and minimal performance loss on different text classification and generation tasks using language models of varying sizes.", "title":"Should We Attend More or Less? Modulating Attention for Fairness", "authors":[ "Abdelrahman Zayed", "Goncalo Mordido", "Samira Shabanian", "Sarath Chandar" ], "id":"Conference", "type":"Poster", "arxiv_id":"2305.13088", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":243 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=C0j44uRPcl", "bibtext":"@inproceedings{\nko2024on,\ntitle={On Robustness-Accuracy Characterization of Language Models using Synthetic Datasets},\nauthor={Ching-Yun Ko and Pin-Yu Chen and Payel Das and Yung-Sung Chuang and Luca Daniel},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=C0j44uRPcl}\n}", "abstract":"In recent years, language models (LMs) that were pretrained at scale on diverse data have proven to be a successful approach for solving different downstream tasks. However, new concerns about proper performance evaluation have been raised, especially for test-data leakage caused by accidentally including them during pretraining, or by indirectly exposing them through API calls for evaluation. Motivated by these, in this paper, we propose a new evaluation workflow that generates steerable synthetic language datasets and proxy tasks for benchmarking the performance of pre-trained LMs on sentence classification tasks. This approach allows for better characterization of the joint analysis on the robustness and accuracy of LMs without risking sensitive information leakage. It also provides a more controlled and private way to evaluate LMs that avoids overfitting specific test sets. Verified on various pretrained LMs, the proposed approach demonstrates promising high correlation with real downstream performance.", "title":"On Robustness-Accuracy Characterization of Language Models using Synthetic Datasets", "authors":[ "Ching-Yun Ko", "Pin-Yu Chen", "Payel Das", "Yung-Sung Chuang", "Luca Daniel" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":244 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=BgvgMxY8s5", "bibtext":"@inproceedings{\nhasan2024handling,\ntitle={Handling Open-Vocabulary Constructs in Formalizing Specifications: Retrieval Augmented Parsing with Expert Knowledge},\nauthor={Mohammad Saqib Hasan and Sayontan Ghosh and Dhruv Verma and Geoff Kuenning and Erez Zadok and Scott Smolka and Niranjan Balasubramanian},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=BgvgMxY8s5}\n}", "abstract":"We study the problem of Open-vocabulary constructs (OVCs), ones that are not known beforehand, in the context of converting natural\nlanguage (NL) specification sentences into formal languages (e.g., LTL or code). Models tend to fare poorly on such OVCs, since they do\nnot have the necessary knowledge a priori. In such settings, a domain expert can provide the correct constructs based on their\npreference or domain knowledge at inference time. Our goal is to effectively reuse this inference-time, expert-provided knowledge in future specification sentences without having to retrain the model. To this end, we first present a new parsing setting---\\emph{dynamic knowledge-augmented parsing} (DKAP)---where, in addition to the input sentence, the model is given (dynamically growing) expert knowledge in the form of a key-value lexicon that associates NL phrases with correct OVC constructs. To address the DKAP problem, we propose ROLex, a retrieval-augmented parsing approach that uses the dynamic expert lexicon. ROLex consists of a retriever and a generator that are trained to find and use the relevant subset of the key-value store to produce the correct parse. One key challenge in realizing this solution is the lack of training data for the retrieval-augmented parsing. We show how we can make use of synthetic data generation, along with original task-level training data---i.e., the (NL sentence, FL statement) pairs---to carry out the requisite training for the retrieval-augmented parsing setting. Further, to improve training effectiveness, we have devised multiple strategies for focusing the model on the relevant subset of retrieved knowledge. Finally, we introduce a new evaluation paradigm designed to address the DKAP problem by simulating the dynamic expert-provided knowledge in three different formalization settings (NL2LTL, NL2Code, and NL2CMD). Our evaluations show that DKAP is a difficult challenge, and ROLex helps improve the performance of baseline models by using dynamic expert knowledge effectively.", "title":"Handling Open-Vocabulary Constructs in Formalizing Specifications: Retrieval Augmented Parsing with Expert Knowledge", "authors":[ "Mohammad Saqib Hasan", "Sayontan Ghosh", "Dhruv Verma", "Geoff Kuenning", "Erez Zadok", "Scott Smolka", "Niranjan Balasubramanian" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":245 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=BaOAvPUyBO", "bibtext":"@inproceedings{\nwu2024do,\ntitle={Do Language Models Plan Ahead for Future Tokens?},\nauthor={Wilson Wu and John Xavier Morris and Lionel Levine},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=BaOAvPUyBO}\n}", "abstract":"Do transformers ``think ahead'' during inference at a given position? It is known transformers prepare information in the hidden states of the forward pass at time step $t$ that is then used in future forward passes $t+\\tau$. We posit two explanations for this phenomenon: pre-caching, in which off-diagonal gradient terms present during training result in the model computing features at $t$ irrelevant to the present inference task but useful for the future, and breadcrumbs, in which features most relevant to time step $t$ are already the same as those that would most benefit inference at time $t+\\tau$. We test these hypotheses by training language models without propagating gradients to past timesteps, a scheme we formalize as myopic training. In a constructed synthetic data setting, we find clear evidence for pre-caching. In the autoregressive language modeling setting, our experiments are more suggestive of the breadcrumbs hypothesis, though pre-caching increases with model scale.", "title":"Do Language Models Plan Ahead for Future Tokens?", "authors":[ "Wilson Wu", "John Xavier Morris", "Lionel Levine" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/wiwu2390\/futuregpt2-public" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":246 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=BDBdblmyzY", "bibtext":"@inproceedings{\nkoo2024automatabased,\ntitle={Automata-based constraints for language model decoding},\nauthor={Terry Koo and Frederick Liu and Luheng He},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=BDBdblmyzY}\n}", "abstract":"Language models (LMs) are often expected to generate strings in some formal language; for example, structured data, API calls, or code snippets.\nAlthough LMs can be tuned to improve their adherence to formal syntax, this does not *guarantee* conformance, especially with smaller LMs suitable for large-scale deployment.\nIn addition, tuning requires significant resources, making it impractical for uncommon or task-specific formats.\nTo prevent downstream parsing errors we would ideally *constrain* the LM to only produce valid output, but this is severely complicated by tokenization, which is typically both ambiguous and misaligned with the formal grammar.\nWe solve these issues through the application of automata theory, deriving an efficient closed-form solution for the *regular languages*, a broad class of formal languages with many practical applications, including API calls or schema-guided JSON and YAML.\nWe also discuss pragmatic extensions for coping with the issue of high branching factor, and extend our techniques to *deterministic context-free languages*, which similarly admit an efficient closed-form solution.\nPrevious work on this topic (Willard and Louf, 2023) layers bespoke solutions onto automata, leading to problems with speed, correctness, and extensibility.\nInstead, we reformulate the entire task in terms of automata so we can leverage well-studied and well-optimized algorithms.\nOur system compiles constraints ~7,000x faster, is provably correct, and can be extended in a modular fashion.", "title":"Automata-based constraints for language model decoding", "authors":[ "Terry Koo", "Frederick Liu", "Luheng He" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":247 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=BAakY1hNKS", "bibtext":"@inproceedings{\nwu2024autogen,\ntitle={AutoGen: Enabling Next-Gen {LLM} Applications via Multi-Agent Conversations},\nauthor={Qingyun Wu and Gagan Bansal and Jieyu Zhang and Yiran Wu and Beibin Li and Erkang Zhu and Li Jiang and Xiaoyun Zhang and Shaokun Zhang and Jiale Liu and Ahmed Hassan Awadallah and Ryen W White and Doug Burger and Chi Wang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=BAakY1hNKS}\n}", "abstract":"We present AutoGen, an open-source framework that allows developers to build LLM applications by composing multiple agents to converse with each other to accomplish tasks. AutoGen agents are customizable, conversable, and can operate in various modes that employ combinations of LLMs, human inputs, and tools. It also enables developers to create flexible agent behaviors and conversation patterns for different applications using both natural language and code. AutoGen serves as a generic infrastructure and is widely used by AI practitioners and researchers to build diverse applications of various complexities and LLM capacities. We demonstrate the framework\u2019s effectiveness with several pilot applications, with domains ranging from mathematics and coding to question-answering, supply-chain optimization, online decision-making, and entertainment.", "title":"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversations", "authors":[ "Qingyun Wu", "Gagan Bansal", "Jieyu Zhang", "Yiran Wu", "Beibin Li", "Erkang Zhu", "Li Jiang", "Xiaoyun Zhang", "Shaokun Zhang", "Jiale Liu", "Ahmed Hassan Awadallah", "Ryen W White", "Doug Burger", "Chi Wang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":248 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=B41hNBoWLo", "bibtext":"@inproceedings{\nmaini2024tofu,\ntitle={{TOFU}: A Task of Fictitious Unlearning for {LLM}s},\nauthor={Pratyush Maini and Zhili Feng and Avi Schwarzschild and Zachary Chase Lipton and J Zico Kolter},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=B41hNBoWLo}\n}", "abstract":"Large language models trained on massive corpora of data from the web can memorize and reproduce sensitive or private data\nraising both legal and ethical concerns. Unlearning, or tuning models to forget information present in their training data, provides us with a way to protect private data after training. Although several methods exist for such unlearning, it is unclear to what extent they result in models equivalent to those where the data to be forgotten was never learned in the first place. To address this challenge, we present TOFU, a Task of Fictitious Unlearning, as a benchmark aimed at helping deepen our understanding of unlearning. We offer a dataset of $200$ diverse synthetic author profiles, each consisting of 20 question-answer pairs, and a subset of these profiles called the forget set that serves as the target for unlearning. We compile a suite of metrics that work together to provide a holistic picture of unlearning efficacy. Finally, we provide a set of baseline results from existing unlearning algorithms. Importantly, none of the baselines we consider show effective unlearning motivating continued efforts to develop approaches for unlearning that effectively tune models so that they truly behave as if they were never trained on the forget data at all.", "title":"TOFU: A Task of Fictitious Unlearning for LLMs", "authors":[ "Pratyush Maini", "Zhili Feng", "Avi Schwarzschild", "Zachary Chase Lipton", "J Zico Kolter" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.06121", "GitHub":[ "https:\/\/github.com\/ucsb-nlp-chang\/uld" ], "paper_page":"https:\/\/huggingface.co\/papers\/2401.06121", "n_linked_authors":3, "upvotes":14, "num_comments":0, "n_authors":5, "Models":[ "locuslab\/tofu_ft_phi-1.5", "locuslab\/tofu_ft_llama2-7b", "RichardErkhov\/locuslab_-_tofu_ft_llama2-7b-4bits", "RichardErkhov\/locuslab_-_tofu_ft_llama2-7b-8bits" ], "Datasets":[ "locuslab\/TOFU", "LZ12DH\/unlearning", "kimperyang\/TOFU-C", "an1118\/TOFU-C", "an1118\/TOFU-Cf", "an1118\/TOFU-Cr", "kimperyang\/TOFUCr1", "kimperyang\/TOFUCrP", "kimperyang\/TOFU-C-Shuffle", "Gyikoo\/TOFU-C-single", "an1118\/TOFU-Cbin", "kimperyang\/TOFU-C-Direct", "Gyikoo\/TOFU-C-All", "an1118\/TOFU-C-All" ], "Spaces":[ "locuslab\/tofu_leaderboard" ], "paper_page_exists_pre_conf":1, "unique_id":249 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=Aaz6R4Tlwv", "bibtext":"@inproceedings{\nedwards2024synergpt,\ntitle={Syner{GPT}: In-Context Learning for Personalized Drug Synergy Prediction and Drug Design},\nauthor={Carl Edwards and Aakanksha Naik and Tushar Khot and Martin D. Burke and Heng Ji and Tom Hope},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=Aaz6R4Tlwv}\n}", "abstract":"Predicting synergistic drug combinations can help accelerate discovery of cancer treatments, particularly therapies personalized to a patient's specific tumor via biopsied cells. In this paper, we propose a novel setting and models for *in-context drug synergy learning*. We are given a small \"personalized dataset\" of 10-20 drug synergy relationships in the context of specific cancer cell targets. Our goal is to predict additional drug synergy relationships in that context. Inspired by recent work that pre-trains a GPT language model (LM) to \"in-context learn\" common function classes, we devise novel pre-training schemes that enable a GPT model to in-context learn \"drug synergy functions\". Our model---which does not use any textual corpora, molecular fingerprints, protein interaction or any other domain-specific knowledge--- is able to achieve competitive results. We further integrate our in-context approach with a genetic algorithm to optimize model prompts and select synergy candidates to test after conducting a patient biopsy. Finally, we explore a novel task of inverse drug design which can potentially enable the design of drugs that synergize specifically to target a given patient's \"personalized dataset'\". Our findings could have an important impact on precision cancer medicine, and also raise intriguing questions on non-textual pre-training for LMs.", "title":"SynerGPT: In-Context Learning for Personalized Drug Synergy Prediction and Drug Design", "authors":[ "Carl Edwards", "Aakanksha Naik", "Tushar Khot", "Martin D. Burke", "Heng Ji", "Tom Hope" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":250 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=ADtL6fgNRv", "bibtext":"@inproceedings{\nhernandez2024inspecting,\ntitle={Inspecting and Editing Knowledge Representations in Language Models},\nauthor={Evan Hernandez and Belinda Z. Li and Jacob Andreas},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=ADtL6fgNRv}\n}", "abstract":"Neural language models (LMs) represent facts about the world described by text. Sometimes these facts derive from training data (in most LMs, a representation of the word *banana* encodes the fact that bananas are fruits). Sometimes facts derive from input text itself (a representation of the sentence *I poured out the bottle* encodes the fact that the bottle became empty). We describe REMEDI, a method for learning to map statements in natural language to fact encodings in an LM's internal representation system. REMEDI encodings can be used as *knowledge editors*: when added to LM hidden representations, they modify downstream generation to be consistent with new facts. REMEDI encodings may also be used as *probes*: when compared to LM representations, they reveal which properties LMs already attribute to mentioned entities, in some cases making it possible to predict when LMs will generate outputs that conflict with background knowledge or input text. REMEDI thus links work on probing, prompting, and LM editing, and offers steps toward general tools for fine-grained inspection and control of knowledge in LMs.", "title":"Inspecting and Editing Knowledge Representations in Language Models", "authors":[ "Evan Hernandez", "Belinda Z. Li", "Jacob Andreas" ], "id":"Conference", "type":"Poster", "arxiv_id":"2304.00740", "GitHub":[ "https:\/\/github.com\/evandez\/remedi" ], "paper_page":"https:\/\/huggingface.co\/papers\/2304.00740", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":251 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=9gdZI7c6yr", "bibtext":"@inproceedings{\nliu2024aligning,\ntitle={Aligning with Human Judgement: The Role of Pairwise Preference in Large Language Model Evaluators},\nauthor={Yinhong Liu and Han Zhou and Zhijiang Guo and Ehsan Shareghi and Ivan Vuli{\\'c} and Anna Korhonen and Nigel Collier},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=9gdZI7c6yr}\n}", "abstract":"Large Language Models (LLMs) have demonstrated promising capabilities as automatic evaluators in assessing the quality of generated natural language. However, LLMs still exhibit biases in evaluation and often struggle to generate coherent evaluations that align with human assessments. In this work, we first conduct a systematic study of the misalignment between LLM evaluators and human evaluation, revealing that existing calibration methods aimed at mitigating biases of LLMs are insufficient for effectively aligning LLM evaluators. Inspired by the use of preference data in RLHF, we formulate the evaluation as a ranking problem and introduce Pairwise-preference Search (PairS), an uncertainty-guided search method that employs LLMs to conduct pairwise comparisons locally and efficiently ranks candidate texts globally. PairS achieves state-of-the-art performance on representative evaluation tasks in long-form generations and demonstrates significant improvements over direct scoring. Furthermore, we provide insights into the role of pairwise preference in quantifying the\ntransitivity of LLMs and demonstrate how PairS benefits from calibration using debiased pairwise evaluations.", "title":"Aligning with Human Judgement: The Role of Pairwise Preference in Large Language Model Evaluators", "authors":[ "Yinhong Liu", "Han Zhou", "Zhijiang Guo", "Ehsan Shareghi", "Ivan Vuli\u0107", "Anna Korhonen", "Nigel Collier" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.16950", "GitHub":[ "https:\/\/github.com\/cambridgeltl\/pairs" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.16950", "n_linked_authors":1, "upvotes":4, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":252 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=9Wmdk94oKF", "bibtext":"@inproceedings{\nshi2024chops,\ntitle={{CHOPS}: {CH}at with custOmer Profile Systems for Customer Service with {LLM}s},\nauthor={Jingzhe Shi and Jialuo Li and Qinwei Ma and Zaiwen Yang and Huan Ma and Lei Li},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=9Wmdk94oKF}\n}", "abstract":"Businesses and software platforms are increasingly utilizing Large Language Models (LLMs) like GPT-3.5, GPT-4, GLM-3, and LLaMa-2 as chat assistants with file access or as reasoning agents for custom service. Current LLM-based customer service models exhibit limited integration with customer profiles and lack operational capabilities, while existing API integrations prioritize diversity over precision and error avoidance that are crucial in real-world scenarios for Customer Service. We propose an LLMs agent called **CHOPS** (**CH**at with cust**O**mer **P**rofile in existing **S**ystem) that: (1) efficiently utilizes existing databases or systems to access user information or interact with these systems based on existing guidance; (2) provides accurate and reasonable responses or executing required operations in the system while avoiding harmful operations; and (3) leverages the combination of small and large LLMs together to provide satisfying performance while having decent inference cost. We introduce a practical dataset, *CPHOS-dataset*, including a database, some guiding files, and QA pairs collected from *CPHOS*, which employs an online platform to facilitate the organization of simulated Physics Olympiads for high school teachers and students. We conduct extensive experiments to validate the performance of our proposed **CHOPS** architecture using the *CPHOS-dataset*, aiming to demonstrate how LLMs can enhance or serve as alternatives to human customer service.", "title":"CHOPS: CHat with custOmer Profile Systems for Customer Service with LLMs", "authors":[ "Jingzhe Shi", "Jialuo Li", "Qinwei Ma", "Zaiwen Yang", "Huan Ma", "Lei Li" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01343", "GitHub":[ "https:\/\/github.com\/jingzheshi\/chops" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":253 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=9JY1QLVFPZ", "bibtext":"@inproceedings{\nzhang2024forcing,\ntitle={Forcing Diffuse Distributions out of Language Models},\nauthor={Yiming Zhang and Avi Schwarzschild and Nicholas Carlini and J Zico Kolter and Daphne Ippolito},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=9JY1QLVFPZ}\n}", "abstract":"Despite being trained specifically to follow user instructions, today\u2019s instruction-tuned\nlanguage models perform poorly when instructed to produce random outputs.\nFor example, when prompted to pick a number uniformly between one and\nten Llama-2-13B-chat disproportionately favors the number five, and when tasked\nwith picking a first name at random, Mistral-7B-Instruct chooses Avery 40 times\nmore often than we would expect based on the U.S. population. When these language\nmodels are used for real-world tasks where diversity of outputs is crucial,\nsuch as language model assisted dataset construction, their inability to produce\ndiffuse distributions over valid choices is a major hurdle. In this work, we propose\na fine-tuning method that encourages language models to output distributions that\nare diffuse over valid outcomes. The methods we introduce generalize across a\nvariety of tasks and distributions and make large language models practical for\nsynthetic dataset generation with little human intervention.", "title":"Forcing Diffuse Distributions out of Language Models", "authors":[ "Yiming Zhang", "Avi Schwarzschild", "Nicholas Carlini", "J Zico Kolter", "Daphne Ippolito" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.10859", "GitHub":[ "https:\/\/github.com\/y0mingzhang\/diffuse-probabilities" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":254 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=9Ik05cycLq", "bibtext":"@inproceedings{\nkumar2024certifying,\ntitle={Certifying {LLM} Safety against Adversarial Prompting},\nauthor={Aounon Kumar and Chirag Agarwal and Suraj Srinivas and Aaron Jiaxun Li and Soheil Feizi and Himabindu Lakkaraju},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=9Ik05cycLq}\n}", "abstract":"Large language models (LLMs) are vulnerable to adversarial attacks, which add maliciously designed token sequences to bypass the model\u2019s safety guardrails and cause it to produce harmful content. In this work, we introduce erase-and-check, the first framework to defend against adversarial prompts with certifiable safety guarantees. Given a prompt, our erase-and-check method erases tokens individually and inspects the resulting subsequences using a safety filter, declaring it harmful if any of the subsequences are detected as harmful. Our safety filters are implemented by leveraging Llama 2 and DistilBERT. We theoretically demonstrate that our method detects harmful prompts with accuracy at least as high as the safety filter. Additionally, we propose three efficient empirical defenses inspired by our erase-and-check (EC) method: i) RandEC, a randomized subsampling version of erase-and-check; ii) GreedyEC, which greedily erases tokens that maximize the softmax score of the harmful class; and iii) GradEC, which uses gradient information to optimize the tokens to erase. Extensive empirical evaluation with real-world datasets demonstrates the effectiveness of the proposed methods in defending against state-of-the-art adversarial prompting attacks.", "title":"Certifying LLM Safety against Adversarial Prompting", "authors":[ "Aounon Kumar", "Chirag Agarwal", "Suraj Srinivas", "Aaron Jiaxun Li", "Soheil Feizi", "Himabindu Lakkaraju" ], "id":"Conference", "type":"Poster", "arxiv_id":"2309.02705", "GitHub":[ "https:\/\/github.com\/aounon\/certified-llm-safety" ], "paper_page":"https:\/\/huggingface.co\/papers\/2309.02705", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ "TrustSafeAI\/GradientCuff-Jailbreak-Defense" ], "paper_page_exists_pre_conf":1, "unique_id":255 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=98ekcwQqb7", "bibtext":"@inproceedings{\njin2024latent,\ntitle={Latent Causal Probing: A Formal Perspective on Probing with Causal Models of Data},\nauthor={Charles Jin},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=98ekcwQqb7}\n}", "abstract":"As language models (LMs) deliver increasing performance on a range of NLP tasks, *probing classifiers* have become an indispensable technique in the effort to better understand their inner workings. A typical setup involves (1) defining an auxiliary task consisting of a dataset of text annotated with labels, then (2) supervising small classifiers to predict the labels from the representations of a pretrained LM as it processes the dataset. A high probing accuracy is interpreted as evidence that the LM has learned to perform the auxiliary task as an unsupervised byproduct of its original pretraining objective. Despite the widespread usage of probes, however, the robust design and analysis of probing experiments remains a challenge. We develop a formal perspective on probing using *structural causal models* (SCM). Specifically, given an SCM which explains the distribution of tokens observed during training, we frame the central hypothesis as whether the LM has learned to represent the latent variables of the SCM. Empirically, we extend a recent study of LMs in the context of a synthetic grid-world navigation task, where having an exact model of the underlying causal structure allows us to draw strong inferences from the result of probing experiments. Our techniques provide robust empirical evidence for the ability of LMs to induce the latent concepts underlying text.", "title":"Latent Causal Probing: A Formal Perspective on Probing with Causal Models of Data", "authors":[ "Charles Jin" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.13765", "GitHub":[ "https:\/\/github.com\/charlesjin\/emergent-semantics" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":256 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=95TayIeqJ4", "bibtext":"@inproceedings{\ntam2024tmmlu,\ntitle={{TMMLU}+: An Improved Traditional Chinese Evaluation Suite for Foundation Models},\nauthor={Zhi Rui Tam and Ya Ting Pai and Yen-Wei Lee and Hong-Han Shuai and Jun-Da Chen and Wei Min Chu and Sega Cheng},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=95TayIeqJ4}\n}", "abstract":"We present TMMLU+, a new benchmark designed for Traditional Chinese language understanding. TMMLU+ is a multi-choice question-answering dataset with 66 subjects from elementary to professional level. It is six times larger and boasts a more balanced subject distribution than its predecessor, Taiwan Massive Multitask Language Understanding (TMMLU). We also benchmark closed-source models and 26 open-weight Chinese large language models (LLMs) of parameters ranging from 1.8B to 72B on the proposed TMMLU+. Our findings reveal that (1.) Traditional Chinese models still trail behind their Simplified Chinese counterparts, highlighting a need for more focused advancements in LLMs catering to Traditional Chinese. (2.) Current LLMs still fall short of human performance in average scores, indicating a potential need for future research to delve deeper into social science and humanities subjects. (3.) Among all the tokenization compression metrics examined, we identify that only the fertility score uniquely demonstrates strong correlations with our benchmark results. We foresee that TMMLU+ will pinpoint areas for future model improvement, thereby narrowing the gap between machine and human linguistic capabilities and supporting researchers in developing Traditional Chinese LLMs. Our dataset, along with the benchmark source code, is accessible at huggingface.co\/datasets\/ikala\/tmmluplus.", "title":"TMMLU+: An Improved Traditional Chinese Evaluation Suite for Foundation Models", "authors":[ "Zhi Rui Tam", "Ya Ting Pai", "Yen-Wei Lee", "Hong-Han Shuai", "Jun-Da Chen", "Wei Min Chu", "Sega Cheng" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":257 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=8w0RApM5yG", "bibtext":"@inproceedings{\nkumari2024bumblebee,\ntitle={BumbleBee: Dynamic {KV}-Cache Streaming Submodular Summarization for Infinite-Context Transformers},\nauthor={Lilly Kumari and Shengjie Wang and Tianyi Zhou and Nikhil Sarda and Anthony Rowe and Jeff Bilmes},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=8w0RApM5yG}\n}", "abstract":"Transformer-based Large Language Models (LLMs) have shown tremendous advancements across various domains. However, their need to maintain key-value representations (a KV cache) of previously seen tokens in the GPU memory leads to a significant memory overhead that scales linearly with the sequence length and batch size. With the advent of extremely long context LLMs, efficiently modeling long-range dependencies becomes challenging. In this work, we focus on the problem of long context summarization by formulating it as a subset selection problem. Specifically, we propose a novel submodular optimization framework called BumbleBee that uses a mixture of submodular functions to balance the diversity amongst the context tokens in the key embedding space and their importance computed using accumulated attention attributed to them across different input tokens. Our framework can work for both the LLM prefill and decoding phases, utilizing offline or online versions of our submodular algorithm respectively. While the context sizes grow to be as large only as the summary size, the temporal extent of the contexts may grow unboundedly, justifying the moniker \u2018\u2018Infinite-Context Transformers.\u2019\u2019 Empirically, we validate the effectiveness of our framework across 13 different datasets using the LLaMA 7B and 13B models. Our results show that BumbleBee improves accuracy compared to state-of-the-art techniques at comparable context reduction ratios.", "title":"BumbleBee: Dynamic KV-Cache Streaming Submodular Summarization for Infinite-Context Transformers", "authors":[ "Lilly Kumari", "Shengjie Wang", "Tianyi Zhou", "Nikhil Sarda", "Anthony Rowe", "Jeff Bilmes" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":258 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=8tKjqqMM5z", "bibtext":"@inproceedings{\nluohe2024keep,\ntitle={Keep the Cost Down: A Review on Methods to Optimize {LLM}{\\textquoteright}s {KV}-Cache Consumption},\nauthor={Shi Luohe and Hongyi Zhang and Yao Yao and Zuchao Li and hai zhao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=8tKjqqMM5z}\n}", "abstract":"Large Language Models (LLMs), epitomized by ChatGPT's release in late 2022, have revolutionized various industries with their advanced language comprehension. However, their efficiency is challenged by the Transformer architecture's struggle with handling long texts. KV-Cache has emerged as a pivotal solution to this issue, converting the time complexity of token generation from quadratic to linear, albeit with increased GPU memory overhead proportional to conversation length. With the development of the LLM community and academia, various KV-Cache compression methods have been proposed. In this review, we dissect the various properties of KV-Cache and elaborate on various methods currently used to optimize the KV-Cache space usage of LLMs. These methods span the pre-training phase, deployment phase, and inference phase, and we summarize the commonalities and differences among these methods. Additionally, we list some metrics for evaluating the long-text capabilities of large language models, from both efficiency and capability perspectives. Our review thus sheds light on the evolving landscape of LLM optimization, offering insights into future advancements in this dynamic field.", "title":"Keep the Cost Down: A Review on Methods to Optimize LLM\u2019s KV-Cache Consumption", "authors":[ "Shi Luohe", "Hongyi Zhang", "Yao Yao", "Zuchao Li", "hai zhao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":259 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=8TdcXwfNRB", "bibtext":"@inproceedings{\nmishra-sharma2024paperclip,\ntitle={{PAPERCLIP}: Associating Astronomical Observations and Natural Language with Multi-Modal Models},\nauthor={Siddharth Mishra-Sharma and YIDING SONG and Jesse Thaler},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=8TdcXwfNRB}\n}", "abstract":"We present PAPERCLIP (Proposal Abstracts Provide an Effective Representation for Contrastive Language-Image Pre-training), a method which associates astronomical observations imaged by telescopes with natural language using a neural network model. The model is fine-tuned from a pre-trained Contrastive Language-Image Pre-training (CLIP) model using successful observing proposal abstracts and corresponding downstream observations, with the abstracts optionally summarized via guided generation using large language models (LLMs). Using observations from the Hubble Space Telescope (HST) as an example, we show that the fine-tuned model embodies a meaningful joint representation between observations and natural language through tests targeting image retrieval (i.e., finding the most relevant observations using natural language queries) and description retrieval (i.e., querying for astrophysical object classes and use cases most relevant to a given observation). Our study demonstrates the potential for using generalist foundation models rather than task-specific models for interacting with astronomical data by leveraging text as an interface.", "title":"PAPERCLIP: Associating Astronomical Observations and Natural Language with Multi-Modal Models", "authors":[ "Siddharth Mishra-Sharma", "YIDING SONG", "Jesse Thaler" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.08851", "GitHub":[ "https:\/\/github.com\/smsharma\/paperclip-hubble" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":260 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7ysaJGs7zY", "bibtext":"@inproceedings{\nshahgir2024illusionvqa,\ntitle={Illusion{VQA}: A Challenging Optical Illusion Dataset for Vision Language Models},\nauthor={Haz Sameen Shahgir and Khondker Salman Sayeed and Abhik Bhattacharjee and Wasi Uddin Ahmad and Yue Dong and Rifat Shahriyar},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7ysaJGs7zY}\n}", "abstract":"The advent of Vision Language Models (VLM) has allowed researchers to investigate the visual understanding of a neural network using natural language. Beyond object classification and detection, VLMs are capable of visual comprehension and common-sense reasoning. This naturally led to the question: How do VLMs respond when the image itself is inherently *unreasonable*? To this end, we present IllusionVQA: a diverse dataset of challenging optical illusions and hard-to-interpret scenes to test the capability of VLMs in two distinct multiple-choice VQA tasks - comprehension and soft localization. GPT4V, the best performing VLM, achieves 62.99\\% accuracy (4-shot) on the comprehension task and 49.7\\% on the localization task (4-shot and Chain-of-Thought). Human evaluation reveals that humans achieve 91.03\\% and 100\\% accuracy in comprehension and localization. We discover that In-Context Learning (ICL) and Chain-of-Thought reasoning substantially degrade the performance of Gemini-Pro on the localization task. Tangentially, we discover a potential weakness in the ICL capabilities of VLMs: they fail to locate optical illusions even when the correct answer is in the context window as a few-shot example.", "title":"IllusionVQA: A Challenging Optical Illusion Dataset for Vision Language Models", "authors":[ "Haz Sameen Shahgir", "Khondker Salman Sayeed", "Abhik Bhattacharjee", "Wasi Uddin Ahmad", "Yue Dong", "Rifat Shahriyar" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/csebuetnlp\/illusionvqa" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":261 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7xUtka9ck9", "bibtext":"@inproceedings{\nhaller2024yes,\ntitle={Yes, no, maybe? Revisiting language models' response stability under paraphrasing for the assessment of political leaning},\nauthor={Patrick Haller and Jannis Vamvas and Lena Ann J{\\\"a}ger},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7xUtka9ck9}\n}", "abstract":"An increasing number of studies are aimed at uncovering characteristics such as personality traits or political leanings of language models (LMs), using questionnaires developed for human respondents. From this previous body of work, it is evident that models are highly sensitive to prompt design, including the phrasing of questions and statements, as well as the format of the expected response (e.g., forced choice, vs open-ended). These sensitivities then often lead to inconsistent responses. However, most studies assess response stability on a small scale with low statistical power e.g., using less than ten paraphrases of the same question.\n\nIn this work, we investigate the stability of responses to binary forced-choice questions using a large number of paraphrases. Specifically, we probe both masked language models (MLMs) and left-to-right generative language models (GLMs) on the political compass test, assessing response validity (i.e., the proportion of valid responses to a prompt) and response stability (i.e., the variability under paraphrasing) across 500 paraphrases of each statement. This large-scale assessment allows us to approximate the underlying distribution of model responses more precisely, both in terms of the overall stability of a model under paraphrasing as well as the stability of specific items (i.e., the intended meaning of a question). In addition, to investigate whether there are structural biases that drive model responses into a certain direction, we test the association between different word- and sentence-level features, and the models' responses.\n\nWe find that while all MLMs exhibit a high degree of response validity, GLMs do not consistently produce valid responses when assessed via forced choice. In terms of response stability, we show that even models that exhibit high overall stability scores flip their responses given certain paraphrases. Crucially, even within-model, response stability can vary considerably between items. We also find that models tend to agree more with statements that show high positive sentiment scores.\n\nBased on our results, we argue that human-centered questionnaires might not be appropriate in the context of probing LMs as both their response validity and stability differ considerably between items. Moreover, although stability metrics represent useful descriptions of model properties, it should be emphasized that even for models exhibiting fairly high stability, specific paraphrases can lead to substantially different model responses.", "title":"Yes, no, maybe? Revisiting language models' response stability under paraphrasing for the assessment of political leaning", "authors":[ "Patrick Haller", "Jannis Vamvas", "Lena Ann J\u00e4ger" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":262 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7jSMMvXLri", "bibtext":"@inproceedings{\nchen2024measuring,\ntitle={Measuring Taiwanese Mandarin Language Understanding},\nauthor={Po-Heng Chen and Sijia Cheng and Wei-Lin Chen and Yen-Ting Lin and Yun-Nung Chen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7jSMMvXLri}\n}", "abstract":"The evaluation of large language models (LLMs) has drawn substantial attention in the field recently.\nThis work focuses on evaluating LLMs in a Chinese context, specifically, for Traditional Chinese which has been largely underrepresented in existing benchmarks.\nWe present TMLU, a comprehensive evaluation suit tailored for assessing the advanced knowledge and reasoning capability in LLMs, under the context of Taiwanese Mandarin.\nTMLU consists of an array of 37 subjects across social science, STEM, humanities, Taiwan-specific content, and others, ranging from middle school to professional levels.\nIn addition, we curate chain-of-thought-like few-shot explanations for each subject to facilitate the evaluation of complex reasoning skills.\nTo establish a comprehensive baseline, we conduct extensive experiments and analysis on 24 advanced LLMs.\nThe results suggest that Chinese open-weight models demonstrate inferior performance comparing to multilingual proprietary ones, and open-weight models tailored for Taiwanese Mandarin lag behind the Simplified-Chinese counterparts.\nThe findings indicate great headrooms for improvement, and emphasize the goal of TMLU to foster the development of localized Taiwanese-Mandarin LLMs.\nWe release the benchmark and evaluation scripts for the community to promote future research.", "title":"Measuring Taiwanese Mandarin Language Understanding", "authors":[ "Po-Heng Chen", "Sijia Cheng", "Wei-Lin Chen", "Yen-Ting Lin", "Yun-Nung Chen" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.20180", "GitHub":[ "https:\/\/github.com\/miulab\/taiwan-llama" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.20180", "n_linked_authors":1, "upvotes":4, "num_comments":0, "n_authors":5, "Models":[ "yentinglin\/Llama-3-Taiwan-70B-Instruct", "yentinglin\/Llama-3-Taiwan-8B-Instruct", "yentinglin\/Llama-3-Taiwan-8B-Instruct-128k", "yentinglin\/Llama-3-Taiwan-70B-Instruct-DPO", "yentinglin\/Llama-3-Taiwan-70B-Instruct-128k", "chienweichang\/Llama-3-Taiwan-8B-Instruct-128k-GGUF", "chienweichang\/Llama-3-Taiwan-70B-Instruct-GGUF", "nihaomur\/Llama-3-Taiwan-8B-Instruct-AWQ-4bit", "yentinglin\/Llama-3-Taiwan-8B-Instruct-DPO", "chienweichang\/Llama-3-Taiwan-8B-Instruct-DPO-GGUF", "chienweichang\/Llama-3-Taiwan-8B-Instruct-GGUF", "RichardErkhov\/yentinglin_-_Llama-3-Taiwan-8B-Instruct-gguf", "pigfoot\/Llama-3-Taiwan-8B-Instruct-V1-5bpw-exl2" ], "Datasets":[ ], "Spaces":[ "yentinglin\/Taiwan-LLaMa2", "Chiuzu\/yentinglin-Llama-3-Taiwan-70B-Instruct", "kevindomo\/yentinglin-Llama-3-Taiwan-70B-Instruct", "kevindomo\/yentinglin-Llama-3-Taiwan-70B-Instruct-DPO", "rubengtsui\/yentinglin-Llama-3-Taiwan-8B-Instruct", "chienweichang\/lmdeploy" ], "paper_page_exists_pre_conf":1, "unique_id":263 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7iaAlIlV2H", "bibtext":"@inproceedings{\nwu2024pairwise,\ntitle={Pairwise Proximal Policy Optimization: Language Model Alignment with Comparative {RL}},\nauthor={Tianhao Wu and Banghua Zhu and Ruoyu Zhang and Zhaojin Wen and Kannan Ramchandran and Jiantao Jiao},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7iaAlIlV2H}\n}", "abstract":"LLMs may exhibit harmful behavior without aligning with human values. The dominant approach for steering LLMs towards beneficial behavior is Reinforcement Learning with Human Feedback (RLHF). This involves training a reward model with a human-labeled ranking dataset and fine-tuning the LLM with the reward signal using RL. Despite the fact that the reward is learned from comparing different responses, the RL stage doesn't involve direct comparisons. This inconsistency between reward learning and reinforcement learning stages exacerbates RL's instability. An example would be that the well adopted RL optimizer, Proximal Policy Optimization (PPO), could perform different gradient updates even for batches with identical human preference information. To address this, we propose a new framework, reinforcement learning with comparative feedback, and a simple policy gradient algorithm, Pairwise Proximal Policy Optimization (P3O), that learns to improve from direct comparison. Theoretically, P3O has the nice property of being invariant with any reward functions that contain identical preference information, while doesn't require learning a value function. Empirical evaluations demonstrate that P3O can align with human preferences better than existing methods. This suggest that comparative RL is strong candidate for aligning LLM with preference data.", "title":"Pairwise Proximal Policy Optimization: Language Model Alignment with Comparative RL", "authors":[ "Tianhao Wu", "Banghua Zhu", "Ruoyu Zhang", "Zhaojin Wen", "Kannan Ramchandran", "Jiantao Jiao" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":264 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7VPKtz8CHN", "bibtext":"@inproceedings{\nzhao2024beyond,\ntitle={Beyond Relevance: Evaluate and Improve Retrievers on Perspective Awareness},\nauthor={Xinran Zhao and Tong Chen and Sihao Chen and Hongming Zhang and Tongshuang Wu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7VPKtz8CHN}\n}", "abstract":"The task of Information Retrieval (IR) requires a system to identify relevant documents based on users' information needs. In real-world scenarios, retrievers are expected to not only rely on the semantic relevance between the documents and the queries but also recognize the nuanced intents or perspectives behind a user query. For example, when asked to verify a claim, a retrieval system is expected to identify evidence from both supporting vs. contradicting perspectives, for the downstream system to make a fair judgment call.\nIn this work, we study whether retrievers can recognize and respond to different perspectives of the queries --- beyond finding relevant documents for a claim, can retrievers distinguish supporting vs. opposing documents? We reform and extend six existing tasks to create a benchmark for retrieval, where we have diverse perspectives described in free-form text, besides root, neutral queries. We show that current retrievers covered in our experiments have limited awareness of subtly different perspectives in queries and can also be biased toward certain perspectives. Motivated by the observation, we further explore the potential to leverage geometric features of retriever representation space to improve the perspective awareness of retrievers in a zero-shot manner. We demonstrate the efficiency and effectiveness of our projection-based methods on the same set of tasks. Further analysis also shows how perspective awareness improves performance on various downstream tasks, with 4.2% higher accuracy on AmbigQA and 29.9% more correlation with designated viewpoints on essay writing, compared to non-perspective-aware baselines.", "title":"Beyond Relevance: Evaluate and Improve Retrievers on Perspective Awareness", "authors":[ "Xinran Zhao", "Tong Chen", "Sihao Chen", "Hongming Zhang", "Tongshuang Wu" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.02714", "GitHub":[ "https:\/\/github.com\/colinzhaoust\/pir" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":265 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7QaEO9WYMa", "bibtext":"@inproceedings{\nfan2024polyvisualexpert,\ntitle={Poly-Visual-Expert Vision-Language Models},\nauthor={Xiaoran Fan and Tao Ji and \u6c5f\u5e38\u7693 and Shuo Li and Senjie Jin and Sirui Song and Junke Wang and Boyang Hong and Lu Chen and Guodong Zheng and Ming Zhang and Huangcaishuang and Rui Zheng and Zhiheng Xi and Yuhao Zhou and Shihan Dou and Junjie Ye and Hang Yan and Tao Gui and Qi Zhang and Xipeng Qiu and Xuanjing Huang and Zuxuan Wu and Yu-Gang Jiang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7QaEO9WYMa}\n}", "abstract":"Current large vision-language models (VLMs) frequently face challenges such as the limited capabilities of a single visual component and the excessive length of visual tokens. These issues can limit the model's ability to interpret complex visual information and over-lengthy contextual information accurately. Tackling these challenges is crucial for enhancing the performance and applicability of VLMs. This paper proposes leveraging the ensemble experts technique to synergize the capabilities of individual visual encoders, including those skilled in image-text matching, image segmentation, OCR, etc. This method introduces a fusion network that consolidates the outputs from different visual experts while bridging the gap between image encoders and pre-trained LLMs. In addition, we explore different positional encoding schemes to mitigate the waste of positional encoding caused by lengthy image feature sequences, effectively addressing the issue of position overflow and length limitations. For instance, in our implementation, this technique significantly reduces the positional occupancy in models like SAM, from a substantial 4096 to a more efficient 64 or even down to 1. Experimental results show that VLMs with multiple experts consistently outperform isolated visual encoders, with notable performance improvements as more experts are integrated. Our codes are available on our project website.", "title":"Poly-Visual-Expert Vision-Language Models", "authors":[ "Xiaoran Fan", "Tao Ji", "\u6c5f\u5e38\u7693", "Shuo Li", "Senjie Jin", "Sirui Song", "Junke Wang", "Boyang Hong", "Lu Chen", "Guodong Zheng", "Ming Zhang", "Huangcaishuang", "Rui Zheng", "Zhiheng Xi", "Yuhao Zhou", "Shihan Dou", "Junjie Ye", "Hang Yan", "Tao Gui", "Qi Zhang", "Xipeng Qiu", "Xuanjing Huang", "Zuxuan Wu", "Yu-Gang Jiang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/fudannlplab\/mousi" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":266 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=7BCmIWVT0V", "bibtext":"@inproceedings{\nsun2024corex,\ntitle={Corex: Pushing the Boundaries of Complex Reasoning through Multi-Model Collaboration},\nauthor={Qiushi Sun and Zhangyue Yin and Xiang Li and Zhiyong Wu and Xipeng Qiu and Lingpeng Kong},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=7BCmIWVT0V}\n}", "abstract":"Large Language Models (LLMs) are evolving at an unprecedented pace and have exhibited considerable capability in the realm of natural language processing (NLP) with world knowledge. Benefiting from ultra-large-scale training corpora, a single LLM can manage typical NLP tasks competently. However, its performance in executing complex tasks is still confined by the limitations of its internal representation. To push this boundary further, we introduce Corex, a suite of novel general-purpose strategies that transform LLMs into autonomous agents, pioneering multi-model collaborations for task-solving. Inspired by human behaviors, Corex is constituted by diverse collaboration paradigms including Discuss, Review, and Retrieve modes, which collectively work towards enhancing the reasoning process. These paradigms foster task-agnostic approaches that enable LLMs to \u201cthink outside the box,\u201d thereby overcoming common errors and providing better solutions. Through extensive experiments across four different types of reasoning tasks, we demonstrate that orchestrating multiple LLM-based agents to work in concert yields better results compared to well-established existing baselines. Further analysis reveals the advantages of Corex over other multi-model methods, synergies produced among different LLMs, and the effectiveness across various aspects.", "title":"Corex: Pushing the Boundaries of Complex Reasoning through Multi-Model Collaboration", "authors":[ "Qiushi Sun", "Zhangyue Yin", "Xiang Li", "Zhiyong Wu", "Xipeng Qiu", "Lingpeng Kong" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.00280", "GitHub":[ "https:\/\/github.com\/qiushisun\/corex" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.00280", "n_linked_authors":1, "upvotes":3, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":267 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=6vEfyp0o68", "bibtext":"@inproceedings{\nding2024mango,\ntitle={{MANGO}: A Benchmark for Evaluating Mapping and Navigation Abilities of Large Language Models},\nauthor={Peng Ding and Jiading Fang and Peng Li and Kangrui Wang and Xiaochen Zhou and Mo Yu and Jing Li and Hongyuan Mei and Matthew Walter},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=6vEfyp0o68}\n}", "abstract":"Large language models such as ChatGPT and GPT-4 have recently achieved astonishing performance on a variety of natural language processing tasks. In this paper, we propose MANGO, a benchmark to evaluate their capabilities to perform text-based mapping and navigation. Our benchmark includes 53 mazes taken from a suite of textgames: each maze is paired with a walkthrough that visits every location but does not cover all possible paths. The task is question-answering: for each maze, a large language model reads the walkthrough and answers hundreds of mapping and navigation questions such as \"How should you go to Attic from West of House?\" and \"Where are we if we go north and east from Cellar?\". Although these questions are easy to humans, it turns out that even GPT-4, the best-to-date language model, performs poorly at answering them. Further, our experiments suggest that a strong mapping and navigation ability would benefit large language models in performing relevant downstream tasks, such as playing textgames. Our MANGO benchmark will facilitate future research on methods that improve the mapping and navigation capabilities of language models. We host our leaderboard, data, code, and evaluation program at https:\/\/mango.ttic.edu and https:\/\/github.com\/oaklight\/mango\/.", "title":"MANGO: A Benchmark for Evaluating Mapping and Navigation Abilities of Large Language Models", "authors":[ "Peng Ding", "Jiading Fang", "Peng Li", "Kangrui Wang", "Xiaochen Zhou", "Mo Yu", "Jing Li", "Hongyuan Mei", "Matthew Walter" ], "id":"Conference", "type":"Poster", "arxiv_id":"2403.19913", "GitHub":[ "https:\/\/github.com\/oaklight\/mango" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":268 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=6U1FEKP7Ar", "bibtext":"@inproceedings{\nwang2024exovip,\ntitle={ExoViP: Step-by-step Verification and Exploration with Exoskeleton Modules for Compositional Visual Reasoning},\nauthor={Yuxuan Wang and Alan Yuille and Zhuowan Li and Zilong Zheng},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=6U1FEKP7Ar}\n}", "abstract":"Compositional visual reasoning methods, which translate a complex query into a structured composition of feasible visual tasks, have exhibited a strong potential in complicated multi-modal tasks. Empowered by recent advances in large language models (LLMs), this multi-modal challenge has been brought to a new stage by treating LLMs as few-shot\/zero-shot planners, i.e., vision-language (VL) programming.\nSuch methods, despite their numerous merits, suffer from challenges due to LLM planning mistakes or inaccuracy of visual execution modules, lagging behind the non-compositional models.\nIn this work, we devise a \"plug-and-play\" method, ExoViP, to correct errors in both the planning and execution stages through introspective verification. We employ verification modules as \"exoskeletons\" to enhance current VL programming schemes. Specifically, our proposed verification module utilizes a mixture of three sub-verifiers to validate predictions after each reasoning step, subsequently calibrating the visual module predictions and refining the reasoning trace planned by LLMs. \nExperimental results on two representative VL programming methods showcase consistent improvements on five compositional reasoning tasks on standard benchmarks. In light of this, we believe that ExoViP can foster better performance and generalization on open-domain multi-modal challenges.", "title":"ExoViP: Step-by-step Verification and Exploration with Exoskeleton Modules for Compositional Visual Reasoning", "authors":[ "Yuxuan Wang", "Alan Yuille", "Zhuowan Li", "Zilong Zheng" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.02210", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2408.02210", "n_linked_authors":3, "upvotes":7, "num_comments":2, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":269 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=60a1SAtH4e", "bibtext":"@inproceedings{\nli2024measuring,\ntitle={Measuring and Controlling Instruction (In)Stability in Language Model Dialogs},\nauthor={Kenneth Li and Tianle Liu and Naomi Bashkansky and David Bau and Fernanda Vi{\\'e}gas and Hanspeter Pfister and Martin Wattenberg},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=60a1SAtH4e}\n}", "abstract":"System-prompting is a standard tool for customizing language-model chatbots, enabling them to follow a specific instruction. An implicit assumption in the use of system prompts is that they will be _stable_, so the chatbot will continue to generate text according to the stipulated instructions for the duration of a conversation. We propose a quantitative benchmark to test this assumption, evaluating instruction stability via self-chats between two instructed chatbots. Testing popular models like LLaMA2-chat-70B and GPT-3.5, we reveal a significant _instruction drift_ within eight rounds of conversations. An empirical and theoretical analysis of this phenomenon suggests the transformer attention mechanism plays a role, due to _attention decay_ over long exchanges. To combat attention decay and instruction drift, we propose a lightweight method called split-softmax, which compares favorably against two strong baselines. \nCode: [https:\/\/github.com\/likenneth\/persona_drift](https:\/\/github.com\/likenneth\/persona_drift).", "title":"Measuring and Controlling Instruction (In)Stability in Language Model Dialogs", "authors":[ "Kenneth Li", "Tianle Liu", "Naomi Bashkansky", "David Bau", "Fernanda Vi\u00e9gas", "Hanspeter Pfister", "Martin Wattenberg" ], "id":"Conference", "type":"Poster", "arxiv_id":"2402.10962", "GitHub":[ "https:\/\/github.com\/likenneth\/persona_drift" ], "paper_page":"https:\/\/huggingface.co\/papers\/2402.10962", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ "Naomibas\/llm-system-prompts-benchmark" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":270 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5u1GpUkKtG", "bibtext":"@inproceedings{\neisenstein2024helping,\ntitle={Helping or Herding? Reward Model Ensembles Mitigate but do not Eliminate Reward Hacking},\nauthor={Jacob Eisenstein and Chirag Nagpal and Alekh Agarwal and Ahmad Beirami and Alexander Nicholas D'Amour and Krishnamurthy Dj Dvijotham and Adam Fisch and Katherine A Heller and Stephen Robert Pfohl and Deepak Ramachandran and Peter Shaw and Jonathan Berant},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5u1GpUkKtG}\n}", "abstract":"Reward models play a key role in aligning language model applications towards human preferences. \nHowever, this setup creates an incentive for the language model to exploit errors in the reward model to achieve high estimated reward, a phenomenon often termed \\emph{reward hacking}.\nA natural mitigation is to train an ensemble of reward models, aggregating over model outputs to obtain a more robust reward estimate. \nWe explore the application of reward ensembles to alignment at both training time (through reinforcement learning) and inference time (through reranking). \nFirst, we show that reward models are \\emph{underspecified}: reward models that perform similarly in-distribution can yield very different rewards when used in alignment, due to distribution shift. \nSecond, underspecification results in overoptimization, where alignment to one reward model does not improve reward as measured by another reward model trained on the same data. \nThird, overoptimization is mitigated by the use of reward ensembles, and ensembles that vary by their \\emph{pretraining} seeds lead to better generalization than ensembles that differ only by their \\emph{fine-tuning} seeds, with both outperforming individual reward models.\nHowever, even pretrain reward ensembles do not eliminate reward hacking: we show several qualitative reward hacking phenomena that are not mitigated by ensembling because all reward models in the ensemble exhibit similar error patterns.", "title":"Helping or Herding? Reward Model Ensembles Mitigate but do not Eliminate Reward Hacking", "authors":[ "Jacob Eisenstein", "Chirag Nagpal", "Alekh Agarwal", "Ahmad Beirami", "Alexander Nicholas D'Amour", "Krishnamurthy Dj Dvijotham", "Adam Fisch", "Katherine A Heller", "Stephen Robert Pfohl", "Deepak Ramachandran", "Peter Shaw", "Jonathan Berant" ], "id":"Conference", "type":"Poster", "arxiv_id":"2312.09244", "GitHub":[ "https:\/\/github.com\/google-deepmind\/reward-ensembles" ], "paper_page":"https:\/\/huggingface.co\/papers\/2312.09244", "n_linked_authors":3, "upvotes":5, "num_comments":1, "n_authors":12, "Models":[ ], "Datasets":[ "taesiri\/arxiv_qa" ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":271 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5fg0VtRxgi", "bibtext":"@inproceedings{\nsodhi2024step,\ntitle={SteP: Stacked {LLM} Policies for Web Actions},\nauthor={Paloma Sodhi and S.R.K Branavan and Yoav Artzi and Ryan McDonald},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5fg0VtRxgi}\n}", "abstract":"Performing tasks on the web presents fundamental challenges to large language models (LLMs), including combinatorially large open-world tasks and variations across web interfaces. Simply specifying a large prompt to handle all possible behaviors and states is extremely complex, and results in behavior leaks between unrelated behaviors. Decomposition to distinct policies can address this challenge but requires carefully handing off control between policies. We propose Stacked LLM Policies for Web Actions (SteP), an approach to dynamically compose policies to solve a diverse set of web tasks. SteP defines a Markov Decision Process where the state is a stack of policies representing the control state, i.e., the chain of policy calls. Unlike traditional methods that are restricted to static hierarchies, SteP enables dynamic control that adapts to the complexity of the task. We evaluate SteP against multiple baselines and web environments including WebArena, MiniWoB++, and a CRM. On WebArena, SteP improves (14.9\\% to 33.5\\%) over SOTA that use GPT-4 policies, while on MiniWob++, SteP is competitive with prior works while using significantly less data. Our code and data are available at https:\/\/asappresearch.github.io\/webagents-step.", "title":"SteP: Stacked LLM Policies for Web Actions", "authors":[ "Paloma Sodhi", "S.R.K Branavan", "Yoav Artzi", "Ryan McDonald" ], "id":"Conference", "type":"Poster", "arxiv_id":"2310.03720", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2310.03720", "n_linked_authors":2, "upvotes":6, "num_comments":1, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":272 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5RdIMlGLXL", "bibtext":"@inproceedings{\nostendorff2024llmdatasets,\ntitle={{LLM}-Datasets: An Open Framework for Pretraining Datasets of Large Language Models},\nauthor={Malte Ostendorff and Pedro Ortiz Suarez and Lucas Fonseca Lage and Georg Rehm},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5RdIMlGLXL}\n}", "abstract":"Large language models have become the cornerstone of today's natural language processing research. To facilitate the training, evaluation, and deployment of language models, the community has developed a series of tools and frameworks and made them openly available. This joint community effort has led to more collaboration, standardization, and overall more progress in language model research. However, one crucial aspect of large language models has been neglected so far: the pretraining datasets. To address this gap, we present an open framework for the collection and systematic compilation of pretraining datasets, called LLM-Datasets. With LLM-Datasets, we make a community-effort and collaborate with experts from the individual languages to collect and systematically compile datasets suitable in terms of data quantity and quality for pretraining language models in a multilingual setting. The framework provides a unified interface to pretraining datasets enabling the download, text extraction, filtering, and sampling of the pretraining data. It is modular and extensible with new datasets and designed with high-performance-computing requirements in mind that are needed to achieve the scale of today's language models. Users of the framework can focus on the actual data composition and reuse existing datasets from the community while ensuring reproducibility. To showcase LLM-Datasets, we compiled a pretraining dataset with 2.3 trillion tokens for a large language model covering 32 European languages.", "title":"LLM-Datasets: An Open Framework for Pretraining Datasets of Large Language Models", "authors":[ "Malte Ostendorff", "Pedro Ortiz Suarez", "Lucas Fonseca Lage", "Georg Rehm" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":273 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5Nsl0nlStc", "bibtext":"@inproceedings{\nmavromatis2024pack,\ntitle={Pack of {LLM}s: Model Fusion at Test-Time via Perplexity Optimization},\nauthor={Costas Mavromatis and Petros Karypis and George Karypis},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5Nsl0nlStc}\n}", "abstract":"Fusing knowledge from multiple Large Language Models (LLMs) can combine their diverse strengths to achieve improved performance on a given task. However, current fusion approaches either rely on learning-based fusers that do not generalize to new LLMs, or do not take into account how well each LLM understands the input. In this work, we study LLM fusion at test-time, which enables leveraging knowledge from arbitrary user-specified LLMs during inference. We introduce Pack of LLMs (PackLLM), an effective method for test-time fusion that leverages each LLM\u2019s expertise, given an input prompt. PackLLM performs model fusion by solving an optimization problem for determining each LLM\u2019s importance, so that perplexity over the input prompt is minimized. First, our simple PackLLM-sim variant validates that perplexity is a good indicator for measuring each LLM\u2019s expertise. Second, our PackLLM-opt variant approximately solves the perplexity minimization problem via a greedy algorithm. The derived importance weights are used to combine the LLMs during inference. We conduct experiments with over 100 total LLMs on a diverse set of tasks. Experimental results show that (i) perplexity is a reliable measure for LLM fusion, (ii) PackLLM outperforms test-time fusion baselines by 1.89% accuracy points, (iii) PackLLM can leverage new LLMs to improve performance over learning-based fusion approaches by 3.92\u201311.94% accuracy points, and (iv) PackLLM benefits over selecting the best or largest model and model merging in certain cases. Our code is provided at [https:\/\/github.com\/cmavro\/PackLLM](https:\/\/github.com\/cmavro\/PackLLM).", "title":"Pack of LLMs: Model Fusion at Test-Time via Perplexity Optimization", "authors":[ "Costas Mavromatis", "Petros Karypis", "George Karypis" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.11531", "GitHub":[ "https:\/\/github.com\/cmavro\/packllm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":274 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5Evv4tIjUI", "bibtext":"@inproceedings{\nlee2024exploiting,\ntitle={Exploiting the Potential of Seq2Seq Models as Robust Few-Shot Learners},\nauthor={Jihyeon Lee and Dain Kim and Doohae Jung and Boseop Kim and Kyoung-Woon On},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5Evv4tIjUI}\n}", "abstract":"In-context learning, which offers substantial advantages over fine-tuning, is predominantly observed in decoder-only models, while encoder-decoder (i.e., seq2seq) models excel in methods that rely on weight updates. Recently, a few studies have demonstrated the feasibility of few-shot learning with seq2seq models; however, this has been limited to tasks that align well with the seq2seq architecture, such as summarization and translation. Inspired by these initial studies, we provide a first-ever extensive experiment comparing the in-context few-shot learning capabilities of decoder-only and encoder-decoder models on a broad range of tasks. Furthermore, we propose two methods to more effectively elicit in-context learning ability in seq2seq models: objective-aligned prompting and a fusion-based approach. Remarkably, our approach outperforms a decoder-only model that is six times larger and exhibits significant performance improvements compared to conventional seq2seq models across a variety of settings. We posit that, with the right configuration and prompt design, seq2seq models can be highly effective few-shot learners for a wide spectrum of applications.", "title":"Exploiting the Potential of Seq2Seq Models as Robust Few-Shot Learners", "authors":[ "Jihyeon Lee", "Dain Kim", "Doohae Jung", "Boseop Kim", "Kyoung-Woon On" ], "id":"Conference", "type":"Poster", "arxiv_id":"2307.14856", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":275 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=5B2K4LRgmz", "bibtext":"@inproceedings{\ngerstgrasser2024is,\ntitle={Is Model Collapse Inevitable? Breaking the Curse of Recursion by Accumulating Real and Synthetic Data},\nauthor={Matthias Gerstgrasser and Rylan Schaeffer and Apratim Dey and Rafael Rafailov and Tomasz Korbak and Henry Sleight and Rajashree Agrawal and John Hughes and Dhruv Bhandarkar Pai and Andrey Gromov and Dan Roberts and Diyi Yang and David L. Donoho and Sanmi Koyejo},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=5B2K4LRgmz}\n}", "abstract":"The proliferation of generative models, combined with pretraining on web-scale data, raises a timely question: what happens when future models are trained on model-generated data? Recent investigations answered that such model-data feedback loops cause performance to progressively degrades with each model-data iteration until fitted models become useless, a phenomenon termed model collapse. However, those studies largely assumed that new data replace old data over time, where a more realistic assumption is that data accumulate over time. In this paper, we ask: what effect does accumulating data have on model collapse? \nWe first empirically study this question by pretraining sequences of language models on text corpora. After confirming that replacing the original real data by each generation's synthetic data does indeed tend towards model collapse, we demonstrate that accumulating synthetic data with real data avoids model collapse; these results hold across a range of sizes, architectures, and hyperparameters. We obtain similar results for other deep generative models: diffusion models for molecule conformation generation and variational autoencoders for image generation. To understand why accumulating data can avoid model collapse, we use an analytically tractable framework introduced by prior work in which a sequence of linear models are fit to previous models' outputs. Previous work used this framework to show that if data are replaced, the test error increases with the number of model-fitting iterations; we extend this argument to prove that if data instead accumulate, the test error has a finite upper bound independent of the number of iterations, meaning model collapse is avoided. \nOur work provides consistent empirical and theoretical evidence that data accumulation avoids model collapse.", "title":"Is Model Collapse Inevitable? Breaking the Curse of Recursion by Accumulating Real and Synthetic Data", "authors":[ "Matthias Gerstgrasser", "Rylan Schaeffer", "Apratim Dey", "Rafael Rafailov", "Tomasz Korbak", "Henry Sleight", "Rajashree Agrawal", "John Hughes", "Dhruv Bhandarkar Pai", "Andrey Gromov", "Dan Roberts", "Diyi Yang", "David L. Donoho", "Sanmi Koyejo" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.01413", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.01413", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":14, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":276 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=4aqq9xTtih", "bibtext":"@inproceedings{\ndong2024promptprompted,\ntitle={Prompt-prompted Adaptive Structured Pruning for Efficient {LLM} Generation},\nauthor={Harry Dong and Beidi Chen and Yuejie Chi},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=4aqq9xTtih}\n}", "abstract":"With the development of transformer-based large language models (LLMs), they have been applied to many fields due to their remarkable utility, but this comes at a considerable computational cost at deployment. Fortunately, some methods such as pruning or constructing a mixture of experts (MoE) aim at exploiting sparsity in transformer feedforward (FF) blocks to gain boosts in speed and reduction in memory requirements. However, these techniques can be very costly and inflexible in practice, as they often require training or are restricted to specific types of architectures. To address this, we introduce GRIFFIN, a novel training-free and calibration-free method that selects unique FF experts at the sequence level for efficient generation across a plethora of LLMs with different non-ReLU activation functions. This is possible due to a critical observation that many trained LLMs naturally produce highly structured FF activation patterns within a sequence, which we call flocking. Despite our method's simplicity, we show with 50% of the FF parameters, GRIFFIN maintains the original model's performance with little to no degradation on a variety of classification and generation tasks, all while improving latency (e.g. 1.29$\\times$ and 1.25$\\times$ speed-ups in Gemma 7B and Llama 2 13B, respectively, on an NVIDIA L40). Code is available at https:\/\/github.com\/hdong920\/GRIFFIN.", "title":"Prompt-prompted Adaptive Structured Pruning for Efficient LLM Generation", "authors":[ "Harry Dong", "Beidi Chen", "Yuejie Chi" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/hdong920\/griffin" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":277 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=4HNAwZFDcH", "bibtext":"@inproceedings{\nstyles2024workbench,\ntitle={WorkBench: a Benchmark Dataset for Agents in a Realistic Workplace Setting},\nauthor={Olly Styles and Sam Miller and Patricio Cerda-Mardini and Tanaya Guha and Victor Sanchez and Bertie Vidgen},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=4HNAwZFDcH}\n}", "abstract":"We introduce WorkBench: a benchmark dataset for evaluating agents\u2019 ability to execute tasks in a workplace setting. WorkBench contains a sandbox environment with five databases, 26 tools, and 690 tasks. These tasks represent common business activities, such as sending emails and scheduling meetings. The tasks in WorkBench are challenging as they require planning, tool selection, and often multiple actions. If a task has been successfully executed, one (or more) of the database values may change. The correct outcome for each task is unique and unambiguous, which allows for robust, automated evaluation. We call this key contribution outcome-centric evaluation. We evaluate five existing ReAct agents on WorkBench, finding they successfully complete as few as 3% of tasks (Llama2-70B), and just 43% for the best-performing (GPT-4). We further find that agents\u2019 errors can result in the wrong action being taken, such as an email being sent to the wrong person. WorkBench reveals weaknesses in agents\u2019 ability to undertake common business activities, raising questions about their use in high-stakes workplace settings. WorkBench is publicly available as a free resource at https:\/\/github.com\/link_updated_upon_acceptance", "title":"WorkBench: a Benchmark Dataset for Agents in a Realistic Workplace Setting", "authors":[ "Olly Styles", "Sam Miller", "Patricio Cerda-Mardini", "Tanaya Guha", "Victor Sanchez", "Bertie Vidgen" ], "id":"Conference", "type":"Poster", "arxiv_id":"2405.00823", "GitHub":[ "https:\/\/github.com\/olly-styles\/workbench" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":278 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=46Zgqo4QIU", "bibtext":"@inproceedings{\nzelikman2024selftaught,\ntitle={Self-Taught Optimizer ({STOP}): Recursively Self-Improving Code Generation},\nauthor={Eric Zelikman and Eliana Lorch and Lester Mackey and Adam Tauman Kalai},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=46Zgqo4QIU}\n}", "abstract":"Several recent advances in AI systems solve problems by providing a \"scaffolding\" program that structures multiple calls to language models to generate better outputs. A scaffolding program is written in a programming language such as Python. In this work, we use a language-model-infused scaffolding program to improve itself. We start with a seed \"improver\" that improves an input program according to a given utility function by querying a language model several times and returning the best solution. We then run this seed improver to improve itself. Across a small set of downstream tasks, the resulting improved improver generates programs with significantly better performance than its seed improver. A variety of self-improvement strategies are proposed by the language model, including beam search, genetic algorithms, and simulated annealing. Since the language models themselves are not altered, this is not full recursive self-improvement. Nonetheless, it demonstrates that a modern language model, GPT-4 in our experiments, is capable of writing code that can call itself to improve itself. We consider concerns around the development of self-improving technologies and evaluate the frequency with which the generated code bypasses a sandbox.", "title":"Self-Taught Optimizer (STOP): Recursively Self-Improving Code Generation", "authors":[ "Eric Zelikman", "Eliana Lorch", "Lester Mackey", "Adam Tauman Kalai" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/microsoft\/stop" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":279 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3ypWPhMGhV", "bibtext":"@inproceedings{\nchu2024cohesive,\ntitle={Cohesive Conversations: Enhancing Authenticity in Multi-Agent Simulated Dialogues},\nauthor={KuanChao Chu and Yi-Pei Chen and Hideki Nakayama},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3ypWPhMGhV}\n}", "abstract":"This paper investigates the quality of multi-agent dialogues in simulations powered by Large Language Models (LLMs). Analyzing dialogues and memory over multiple sessions revealed significant issues such as repetition, inconsistency, and hallucination, exacerbated by the propagation of erroneous information. To combat these challenges, we propose a novel Screening, Diagnosis, and Regeneration (SDR) framework that detects and corrects utterance errors through a comprehensive process involving immediate issue identification, evidence gathering from past dialogues, and LLM analysis for utterance revision. By incorporating our SDR framework to Generative Agents (Park et al., 2023), we enhance the diversity, consistency, and factualness of the generated dialogues. This work presents a pioneering approach to enhancing dialogue quality in multi-agent simulations, establishing a new standard for future research in the field.", "title":"Cohesive Conversations: Enhancing Authenticity in Multi-Agent Simulated Dialogues", "authors":[ "KuanChao Chu", "Yi-Pei Chen", "Hideki Nakayama" ], "id":"Conference", "type":"Poster", "arxiv_id":"2407.09897", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":280 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3nTbuygoop", "bibtext":"@inproceedings{\nwu2024stateflow,\ntitle={StateFlow: Enhancing {LLM} Task-Solving through State-Driven Workflows},\nauthor={Yiran Wu and Tianwei Yue and Shaokun Zhang and Chi Wang and Qingyun Wu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3nTbuygoop}\n}", "abstract":"It is a notable trend to use Large Language Models (LLMs) to tackle complex tasks, e.g., tasks that require a sequence of actions and dynamic interaction with tools and external environments.\nIn this paper, we propose StateFlow, a novel LLM-based task-solving paradigm that conceptualizes complex task-solving processes as state machines.\nIn StateFlow, we distinguish between \"process grounding\u201d (via state and state transitions) and \"sub-task solving\u201d (through actions within a state), enhancing control and interpretability of the task-solving procedure. A state represents the status of a running process. The transitions between states are controlled by heuristic rules or decisions made by the LLM, allowing for a dynamic and adaptive progression.\nUpon entering a state, a series of actions is executed, involving not only calling LLMs guided by different prompts, but also the utilization of external tools as needed. \n Our results show that StateFlow significantly enhances LLMs' efficiency. For instance, StateFlow achieves 13\\% and 28\\% higher success rates compared to ReAct in InterCode SQL and ALFWorld benchmark, with 5$\\times$ and 3$\\times$ less cost respectively. \nWe also show that StateFlow can be combined with iterative refining methods like Reflexion to further improve performance.", "title":"StateFlow: Enhancing LLM Task-Solving through State-Driven Workflows", "authors":[ "Yiran Wu", "Tianwei Yue", "Shaokun Zhang", "Chi Wang", "Qingyun Wu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yiranwu0\/stateflow" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":281 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3X2L2TFr0f", "bibtext":"@inproceedings{\nhu2024minicpm,\ntitle={Mini{CPM}: Unveiling the Potential of Small Language Models with Scalable Training Strategies},\nauthor={Shengding Hu and Yuge Tu and Xu Han and Ganqu Cui and Chaoqun He and Weilin Zhao and Xiang Long and Zhi Zheng and Yewei Fang and Yuxiang Huang and Xinrong Zhang and Zhen Leng Thai and Chongyi Wang and Yuan Yao and Chenyang Zhao and Jie Zhou and Jie Cai and Zhongwu Zhai and Ning Ding and Chao Jia and Guoyang Zeng and dahai li and Zhiyuan Liu and Maosong Sun},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3X2L2TFr0f}\n}", "abstract":"The burgeoning interest in developing Large Language Models (LLMs) with up to trillion parameters has been met with concerns regarding resource efficiency and practical expense, particularly given the immense cost of experimentation. This scenario underscores the importance of exploring the potential of Small Language Models (SLMs) as a resource-efficient alternative. In this context, we introduce MiniCPM, specifically the 1.2B and 2.4B non-embedding parameter variants, not only excel in their respective categories but also demonstrate capabilities on par with 7B-13B LLMs. While focusing on SLMs, our approach exhibits scalability in both model and data dimensions for future LLM research. Regarding model scaling, we employ extensive model wind tunnel experiments for stable and optimal scaling. For data scaling, we introduce a Warmup-Stable-Decay (WSD) learning rate scheduler (LRS), conducive to continuous training and domain adaptation. We present an in-depth analysis of the intriguing training dynamics that occurred in the WSD LRS. With WSD LRS, we are now able to efficiently study data-model scaling law without extensive retraining experiments on both axes of model and data, from which we derive the much higher compute optimal data-model ratio than Chinchilla Optimal. Additionally, we introduce MiniCPM family, including MiniCPM-DPO, MiniCPM-MoE and MiniCPM-128K, whose excellent performance further cementing MiniCPM's foundation in diverse SLM applications. MiniCPM models are available publicly~\\footnote{\\url{https:\/\/github.com\/OpenBMB\/MiniCPM}}.", "title":"MiniCPM: Unveiling the Potential of Small Language Models with Scalable Training Strategies", "authors":[ "Shengding Hu", "Yuge Tu", "Xu Han", "Ganqu Cui", "Chaoqun He", "Weilin Zhao", "Xiang Long", "Zhi Zheng", "Yewei Fang", "Yuxiang Huang", "Xinrong Zhang", "Zhen Leng Thai", "Chongyi Wang", "Yuan Yao", "Chenyang Zhao", "Jie Zhou", "Jie Cai", "Zhongwu Zhai", "Ning Ding", "Chao Jia", "Guoyang Zeng", "dahai li", "Zhiyuan Liu", "Maosong Sun" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/openbmb\/minicpm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":282 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3TzGD95Jw1", "bibtext":"@inproceedings{\nsu2024timo,\ntitle={Timo: Towards Better Temporal Reasoning for Language Models},\nauthor={Zhaochen Su and Jun Zhang and Tong Zhu and Xiaoye Qu and Juntao Li and Min zhang and Yu Cheng},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3TzGD95Jw1}\n}", "abstract":"Reasoning about time is essential for Large Language Models (LLMs) to understand the world. Previous works focus on solving specific tasks, primarily on time-sensitive question answering.\nWhile these methods have proven effective, they cannot generalize to a wider spectrum of temporal reasoning tasks.\nTherefore, we propose a crucial question: Can we build a universal framework to handle a variety of temporal reasoning tasks?\nTo that end, we systematically study 38 temporal reasoning tasks.\nBased on the observation that 19 tasks are directly related to mathematics, we first leverage the available mathematical dataset to set a solid foundation for temporal reasoning.\nHowever, the in-depth study indicates that focusing solely on mathematical enhancement falls short of addressing pure temporal reasoning tasks. To mitigate this limitation, we propose a simple but effective self-critic temporal optimization method to enhance the model's temporal reasoning capabilities without sacrificing general task abilities.\nFinally, we develop Timo, a model designed to excel in temporal reasoning at the 7B and 13B scales. Notably, Timo outperforms the counterpart LLMs by 10.0 and 7.6 in average accuracy scores and achieves the new state-of-the-art (SOTA) performance of comparable size. Extensive experiments further validate our framework's effectiveness and its generalization across diverse temporal tasks. The code is available at https:\/\/github.com\/zhaochen0110\/Timo.", "title":"Timo: Towards Better Temporal Reasoning for Language Models", "authors":[ "Zhaochen Su", "Jun Zhang", "Tong Zhu", "Xiaoye Qu", "Juntao Li", "Min zhang", "Yu Cheng" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhaochen0110\/timo" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":283 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3HTVP34WWE", "bibtext":"@inproceedings{\nwang2024bot,\ntitle={Bot or Human? Detecting Chat{GPT} Imposters with A Single Question},\nauthor={Hong Wang and Xuan Luo and Weizhi Wang and Melody Yu and Xifeng Yan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3HTVP34WWE}\n}", "abstract":"Large language models (LLMs) like GPT-4 have recently demonstrated impressive capabilities in natural language understanding and generation. However, there is a concern that they can be misused for malicious purposes, such as fraud or denial-of-service attacks. Therefore, it is crucial to develop methods for detecting whether the party involved in a conversation is a bot or a human. In this paper, we propose a framework named **FLAIR**, Finding Large Language Model Authenticity via a Single Inquiry and Response, to detect conversational bots in an online manner. Specifically, we target a single question scenario that can effectively differentiate human users from bots. The questions are divided into two categories: those that are easy for humans but difficult for bots (e.g., counting, substitution, searching, and ASCII art reasoning), and those that are easy for bots but difficult for humans (e.g., memorization and computation). Our approach shows different strengths of these questions in their effectiveness, providing a new way for online service providers to protect themselves against nefarious activities. Our code and question set are available at https:\/\/github.com\/hongwang600\/FLAIR.", "title":"Bot or Human? Detecting ChatGPT Imposters with A Single Question", "authors":[ "Hong Wang", "Xuan Luo", "Weizhi Wang", "Melody Yu", "Xifeng Yan" ], "id":"Conference", "type":"Poster", "arxiv_id":"2305.06424", "GitHub":[ "https:\/\/github.com\/hongwang600\/flair" ], "paper_page":"https:\/\/huggingface.co\/papers\/2305.06424", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":284 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=3GhOWfSLrD", "bibtext":"@inproceedings{\nwang2024will,\ntitle={Will the Real Linda Please Stand up...to Large Language Models? Examining the Representativeness Heuristic in {LLM}s},\nauthor={Pengda Wang and Zilin Xiao and Hanjie Chen and Frederick L. Oswald},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=3GhOWfSLrD}\n}", "abstract":"Although large language models (LLMs) have demonstrated remarkable proficiency in modeling text and generating human-like text, they may exhibit biases acquired from training data in doing so. Specifically, LLMs may be susceptible to a common cognitive trap in human decision-making called the representativeness heuristic. This is a concept in psychology that refers to judging the likelihood of an event based on how closely it resembles a well-known prototype or typical example, versus considering broader facts or statistical evidence. This research investigates the impact of the representativeness heuristic on LLM reasoning. We created ReHeAT (Representativeness Heuristic AI Testing), a dataset containing a series of problems spanning six common types of representativeness heuristics. Experiments reveal that four LLMs applied to ReHeAT all exhibited representativeness heuristic biases. We further identify that the model's reasoning steps are often incorrectly based on a stereotype rather than on the problem's description. Interestingly, the performance improves when adding a hint in the prompt to remind the model to use its knowledge. This suggests the uniqueness of the representativeness heuristic compared to traditional biases. It can occur even when LLMs possess the correct knowledge while falling into a cognitive trap. This highlights the importance of future research focusing on the representativeness heuristic in model reasoning and decision-making and on developing solutions to address it.", "title":"Will the Real Linda Please Stand up...to Large Language Models? Examining the Representativeness Heuristic in LLMs", "authors":[ "Pengda Wang", "Zilin Xiao", "Hanjie Chen", "Frederick L. Oswald" ], "id":"Conference", "type":"Oral", "arxiv_id":"2404.01461", "GitHub":[ "https:\/\/github.com\/mrzilinxiao\/llmheuristicreheat" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":285 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=2wtj0up8rv", "bibtext":"@inproceedings{\nzhou2024enhancing,\ntitle={Enhancing Language Models with Idiomatic Reasoning},\nauthor={Jianing Zhou and Ziheng Zeng and Hongyu Gong and Suma Bhat},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=2wtj0up8rv}\n}", "abstract":"Advancements in Large Language Models (LLMs) have significantly propelled the field of Natural Language Processing (NLP); however, nuanced reasoning in the presence of non-canonical language forms, such as figurative language, remains an intricate challenge. These language forms, integral to human communication, elude standard LLM comprehension due to their inherent non-compositionality, contextual ambiguity, and sparse representation in text corpora. Addressing these challenges, this paper introduces an innovative approach to seamlessly incorporate idiomatic knowledge into pre-trained language models (PTLMs). Our methodology first employs a multi-view data augmentation strategy that uses idiomatic instances representing one property to generate training data for various idiom-related tasks. When combined with a novel parameter-efficient tuning mechanism that accounts for the unique attributes of idiomatic language, we embed task-specific and idiomaticity-aware inductive biases within a PTLM. Integrating a meta-pretraining protocol based on meta-learning principles, further equips the model with enhanced adaptability to diverse downstream idiom-aware tasks. Empirical validation on diverse benchmarks centered around idiom comprehension and reasoning, demonstrates the efficacy of our approach. Notably, our model surpasses various parameter-efficient fine-tuning baselines outperforming the conventional full fine-tuning paradigms, thereby creating more contextually aware and linguistically robust language models.", "title":"Enhancing Language Models with Idiomatic Reasoning", "authors":[ "Jianing Zhou", "Ziheng Zeng", "Hongyu Gong", "Suma Bhat" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":286 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=2oHnsM9M9D", "bibtext":"@inproceedings{\nbrassard2024acorn,\ntitle={{ACORN}: Aspect-wise Commonsense Reasoning Explanation Evaluation},\nauthor={Ana Brassard and Benjamin Heinzerling and Keito Kudo and Keisuke Sakaguchi and Kentaro Inui},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=2oHnsM9M9D}\n}", "abstract":"Evaluating the quality of free-text explanations is a multifaceted, subjective, and labor-intensive task. Large language models (LLMs) present an appealing alternative due to their potential for consistency, scalability, and cost-efficiency. In this work, we present ACORN, a new dataset of 3,500 free-text explanations and aspect-wise quality ratings, and use it to evaluate how LLMs rate explanations. We observed that larger models outputted labels that maintained or increased the inter-annotator agreement, suggesting that they are within the expected variance between human raters. However, their correlation with majority-voted human ratings varied across different quality aspects, indicating that they are not a complete replacement. In turn, using LLMs as a supplement to a smaller group of human raters in some cases improved the correlation with the original majority labels. However, the effect was limited to cases where human raters were scarce, and an additional human rater had a more pronounced effect in all cases. Overall, we recommend against using LLMs as a complete replacement for human raters but encourage using them in configurations that end with targeted human involvement.", "title":"ACORN: Aspect-wise Commonsense Reasoning Explanation Evaluation", "authors":[ "Ana Brassard", "Benjamin Heinzerling", "Keito Kudo", "Keisuke Sakaguchi", "Kentaro Inui" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/a-brassard\/acorn" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":287 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=2nTzomzjjb", "bibtext":"@inproceedings{\njin2024prollm,\ntitle={Pro{LLM}: Protein Chain-of-Thoughts Enhanced {LLM} for Protein-Protein Interaction Prediction},\nauthor={Mingyu Jin and Haochen Xue and Zhenting Wang and Boming Kang and Ruosong Ye and Kaixiong Zhou and Mengnan Du and Yongfeng Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=2nTzomzjjb}\n}", "abstract":"The prediction of protein-protein interactions (PPIs) is crucial for understanding biological functions and diseases. Previous machine learning approaches to PPI prediction mainly focus on direct physical interactions, ignoring the broader context of nonphysical connections through intermediate proteins, thus limiting their effectiveness. The emergence of Large Language Models (LLMs) provides a new opportunity for addressing this complex biological challenge. By transforming structured data into natural language prompts, we can map the relationships between proteins into texts. This approach allows LLMs to identify indirect connections between proteins, tracing the path from upstream to downstream. Therefore, we propose a novel framework ProLLM that employs an LLM tailored for PPI for the first time. Specifically, we propose Protein Chain of Thought (ProCoT), which replicates the biological mechanism of signaling pathways as natural language prompts. ProCoT considers a signaling pathway as a protein reasoning process, which starts from upstream proteins and passes through several intermediate proteins to transmit biological signals to downstream proteins. Thus, we can use ProCoT to predict the interaction between upstream proteins and downstream proteins. The training of ProLLM employs the ProCoT format, which enhances the model's understanding of complex biological problems. In addition to ProCoT, this paper also contributes to the exploration of embedding replacement of protein sites in natural language prompts, and instruction fine-tuning in protein knowledge datasets. We demonstrate the efficacy of ProLLM through rigorous validation against benchmark datasets, showing significant improvement over existing methods in terms of prediction accuracy and generalizability. Our results highlight the potential of LLMs to transform the field of PPI, serving as a robust potential tool for various categories of biological and medical research. The code is available at: https:\/\/anonymous.4open.science\/r\/ProLLM-AB04.", "title":"ProLLM: Protein Chain-of-Thoughts Enhanced LLM for Protein-Protein Interaction Prediction", "authors":[ "Mingyu Jin", "Haochen Xue", "Zhenting Wang", "Boming Kang", "Ruosong Ye", "Kaixiong Zhou", "Mengnan Du", "Yongfeng Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mingyuj666\/prollm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":288 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=2cop2jmQVL", "bibtext":"@inproceedings{\ngandhi2024stream,\ntitle={Stream of Search (SoS): Learning to Search in Language},\nauthor={Kanishk Gandhi and Denise H J Lee and Gabriel Grand and Muxin Liu and Winson Cheng and Archit Sharma and Noah Goodman},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=2cop2jmQVL}\n}", "abstract":"Language models are rarely shown fruitful mistakes while training. They then struggle to look beyond the next token, suffering from a snowballing of errors and struggling to predict the consequence of their actions several steps ahead. In this paper, we show how language models can be taught to search by representing the process of search in language, as a flattened string --- stream of search (SoS). We propose a unified language for search that captures an array of different symbolic search strategies. We demonstrate our approach using the simple yet difficult game of Countdown, where the goal is to combine input numbers with arithmetic operations to reach a target number. We pretrain a transformer-based language model from scratch on a dataset of streams of search generated by heuristic solvers. We find that SoS pretraining increases search accuracy by 25\\% over models trained to predict only the optimal search trajectory. We further finetune this model with two policy improvement methods: Advantage-Induced Policy Alignment (APA) and Self-Taught Reasoner (STaR). The finetuned SoS models solve 36\\% of previously unsolved problems, including problems that cannot be solved by any of the heuristic solvers. Our results indicate that language models can learn to solve problems via search, self-improve to flexibly use different search strategies, and potentially discover new ones.", "title":"Stream of Search (SoS): Learning to Search in Language", "authors":[ "Kanishk Gandhi", "Denise H J Lee", "Gabriel Grand", "Muxin Liu", "Winson Cheng", "Archit Sharma", "Noah Goodman" ], "id":"Conference", "type":"Oral", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kanishkg\/stream-of-search" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":289 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=1pgfvZj0Rx", "bibtext":"@inproceedings{\ngrabb2024risks,\ntitle={Risks from Language Models for Automated Mental Healthcare: Ethics and Structure for Implementation},\nauthor={Declan Grabb and Max Lamparth and Nina Vasan},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=1pgfvZj0Rx}\n}", "abstract":"Amidst the growing interest in developing task-autonomous AI for automated mental health care, this paper addresses the ethical and practical challenges associated with the issue and proposes a structured framework that delineates levels of autonomy, outlines ethical requirements, and defines beneficial default behaviors for AI agents in the context of mental health support. We also evaluate fourteen state-of-the-art language models (ten off-the-shelf, four fine-tuned) using 16 mental health-related questions designed to reflect various mental health conditions, such as psychosis, mania, depression, suicidal thoughts, and homicidal tendencies. The question design and response evaluations were conducted by mental health clinicians (M.D.s). We find that existing language models are insufficient to match the standard provided by human professionals who can navigate nuances and appreciate context. This is due to a range of issues, including overly cautious or sycophantic responses and the absence of necessary safeguards. Alarmingly, we find that most of the tested models could cause harm if accessed in mental health emergencies, failing to protect users and potentially exacerbating existing symptoms. We explore solutions to enhance the safety of current models. Before the release of increasingly task-autonomous AI systems in mental health, it is crucial to ensure that these models can reliably detect and manage symptoms of common psychiatric disorders to prevent harm to users. This involves aligning with the ethical framework and default behaviors outlined in our study. We contend that model developers are responsible for refining their systems per these guidelines to safeguard against the risks posed by current AI technologies to user mental health and safety.\n\nTrigger warning: Contains and discusses examples of sensitive mental health topics, including suicide and self-harm.", "title":"Risks from Language Models for Automated Mental Healthcare: Ethics and Structure for Implementation", "authors":[ "Declan Grabb", "Max Lamparth", "Nina Vasan" ], "id":"Conference", "type":"Poster", "arxiv_id":"2406.11852", "GitHub":[ "https:\/\/github.com\/maxlampe\/taimh_eval" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":290 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=1eg6UnpYu7", "bibtext":"@inproceedings{\nwu2024prompt,\ntitle={Prompt Public Large Language Models to Synthesize Data for Private On-device Applications},\nauthor={Shanshan Wu and Zheng Xu and Yanxiang Zhang and Yuanbo Zhang and Daniel Ramage},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=1eg6UnpYu7}\n}", "abstract":"Pre-training on public data is an effective method to improve the performance for federated learning (FL) with differential privacy (DP). This paper investigates how large language models (LLMs) trained on public data can improve the quality of pre-training data for the on-device language models trained with DP and FL. We carefully design LLM prompts to filter and transform existing public data, and generate new data to resemble the real user data distribution. The model pre-trained on our synthetic dataset achieves relative improvement of 19.0\\% and 22.8\\% in next word prediction accuracy compared to the baseline model pre-trained on a standard public dataset, when evaluated over the real user data in Gboard (Google Keyboard, a production mobile keyboard application). Furthermore, our method achieves evaluation accuracy better than or comparable to the baseline during the DP FL fine-tuning over the user data from millions of mobile devices, and our final model outperforms the baseline in production A\/B testing. Our experiments demonstrate the strengths of LLMs in synthesizing data close to the private distribution even without accessing the private data, and also suggest future research directions to further reduce the distribution gap.", "title":"Prompt Public Large Language Models to Synthesize Data for Private On-device Applications", "authors":[ "Shanshan Wu", "Zheng Xu", "Yanxiang Zhang", "Yuanbo Zhang", "Daniel Ramage" ], "id":"Conference", "type":"Poster", "arxiv_id":"2404.04360", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":291 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=1ba209BACA", "bibtext":"@inproceedings{\nwu2024agentdocedit,\ntitle={Agent-DocEdit: Language-Instructed {LLM} Agent for Content-Rich Document Editing},\nauthor={Te-Lin Wu and Rajiv Jain and Yufan Zhou and Puneet Mathur and Vlad I Morariu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=1ba209BACA}\n}", "abstract":"Editing content-rich and multimodal documents, such as posters, flyers, and slides, can be tedious if the edits are complex, repetitive, or require subtle skills and deep knowledge of the editing software.\nMotivated by recent advancements in both Large Language Model (LLM) agents and multimodal modeling, we propose a framework that automates document editing which takes as input a linguistic edit request from the user and then performs sequential editing actions to the document the satisfy the request.\nOur proposed method, Agent-DocEdit, first grounds the edit request directly in the underlying document structure to identify the elements that need to be manipulated. Then, we rely on the agent capabilities of LLMs to generate an edit program which calls a set of pre-defined APIs to modify the underlying structure of the document.\nTo improve the generated edit program, we leverage a feedback mechanism incorporating a deterministic code executor and a multimodal LLM.\nWe demonstrate the effectiveness of our proposed modularized LLM editing agent on the DocEdit dataset, where Agent-DocEdit outperforms existing state-of-the-art methods by 70+% in document element grounding and 16+% on final rendition generation.", "title":"Agent-DocEdit: Language-Instructed LLM Agent for Content-Rich Document Editing", "authors":[ "Te-Lin Wu", "Rajiv Jain", "Yufan Zhou", "Puneet Mathur", "Vlad I Morariu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":292 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=1Tny4KgGO2", "bibtext":"@inproceedings{\nxie2024from,\ntitle={From Strategic Narratives to Code-Like Cognitive Models: An {LLM}-Based Approach in A Sorting Task},\nauthor={Hanbo Xie and Hua-Dong Xiong and Robert Wilson},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=1Tny4KgGO2}\n}", "abstract":"One of the goals of Cognitive Science is to understand the cognitive processes underlying human behavior. Traditionally, this goal has been approached by analyzing simple behaviors, such as choices and response times, to try to indirectly infer mental processes. However, a more direct approach is to simply ask people to report their thoughts - for example, by having them Introspect after the fact about the thought processes they used to complete a task. However, the data generated by such verbal reports have been hard to analyze, and whether the reported thoughts are an accurate reflection of the underlying cognitive processes has been difficult to test. Here we take a first stab at addressing these questions by using large language models to analyze verbally reported strategies in a sorting task. In the task, participants sort lists of pictures with unknown orders by pairwise comparison. After completing the task, participants wrote a description of their strategy for completing the task. To test whether these strategy descriptions contained information about people\u2019s actual strategies, we compared their choice behavior with their descriptions of the task. First, we compared the descriptions and choices at the level of strategy, finding that people who used similar sorting algorithms (based on their choices) provided similar verbal descriptions (based on the embeddings of these descriptions in the LLM). Next, we generated code based on their strategy descriptions using GPT-4-Turbo and compared the simulated behaviors from the code to their actual choice behavior, showing that the LLM-generated code predicts choice more accurately than chance other, more stringent, controls. Finally, we also compare the simulated behaviors of generated codes with those from standard algorithms and induct the strategies that this code internally represents. In sum, our study offers a novel approach to modeling human cognitive processes by building code-like cognitive models from introspections, shedding light on the intersection of Artificial Intelligence and Cognitive Sciences.", "title":"From Strategic Narratives to Code-Like Cognitive Models: An LLM-Based Approach in A Sorting Task", "authors":[ "Hanbo Xie", "Hua-Dong Xiong", "Robert Wilson" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":293 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=18iNTRPx8c", "bibtext":"@inproceedings{\nchen2024see,\ntitle={See What {LLM}s Cannot Answer: A Self-Challenge Framework for Uncovering {LLM} Weaknesses},\nauthor={Yulong Chen and Yang Liu and Jianhao Yan and Xuefeng Bai and Ming Zhong and Yinghao Yang and Ziyi Yang and Chenguang Zhu and Yue Zhang},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=18iNTRPx8c}\n}", "abstract":"The impressive performance of Large Language Models (LLMs) has consistently surpassed numerous human-designed benchmarks, presenting new challenges in assessing the shortcomings of LLMs. \nDesigning tasks and finding LLMs' limitations are becoming increasingly important.\nIn this paper, we investigate the question of whether an LLM can discover its own limitations from the errors it makes. \nTo this end, we propose a Self-Challenge evaluation framework with human-in-the-loop.\nStarting from seed instances that GPT-4 fails to answer, we prompt GPT-4 to summarize error patterns that can be used to generate new instances and incorporate human feedback on them to refine these patterns for generating more challenging data, iteratively.\nWe end up with 8 diverse patterns, such as text manipulation and questions with assumptions. \nWe then build a benchmark, SC-G4, consisting of 1,835 instances generated by GPT-4 using these patterns, with human-annotated gold responses.\nThe SC-G4 serves as a challenging benchmark that allows for a detailed assessment of LLMs' abilities. \nOur results show that only 44.96\\% of instances in SC-G4 can be answered correctly by GPT-4. \nInterestingly, our pilot study indicates that these error patterns also challenge other LLMs, such as Claude-3 and Llama-3, and cannot be fully resolved through fine-tuning. Our work takes the first step to demonstrate that LLMs can autonomously identify their inherent flaws and provide insights for future dynamic and automatic evaluation.", "title":"See What LLMs Cannot Answer: A Self-Challenge Framework for Uncovering LLM Weaknesses", "authors":[ "Yulong Chen", "Yang Liu", "Jianhao Yan", "Xuefeng Bai", "Ming Zhong", "Yinghao Yang", "Ziyi Yang", "Chenguang Zhu", "Yue Zhang" ], "id":"Conference", "type":"Poster", "arxiv_id":"2408.08978", "GitHub":[ "https:\/\/github.com\/cylnlp\/Self-Challenge-GPT4" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":294 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=0oiG1KigYN", "bibtext":"@inproceedings{\nadams2024speer,\ntitle={{SPEER}: Sentence-Level Planning of Long Clinical Summaries via Embedded Entity Retrieval},\nauthor={Griffin Thomas Adams and Jason Zucker and No{\\'e}mie Elhadad},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=0oiG1KigYN}\n}", "abstract":"Clinician must write a lengthy summary each time a patient is discharged from the hospital. This task is time-consuming due to the sheer number of unique clinical concepts covered in the admission. Identifying and covering salient entities is vital for the summary to be clinically useful. We fine-tune open-source LLMs (Mistral-7B-Instruct and Zephyr-7B-$\\beta$) on the task and find that they generate incomplete and unfaithful summaries. To increase entity coverage, we train a smaller, encoder-only model to predict salient entities, which are treated as content-plans to guide the LLM. To encourage the LLM to focus on specific mentions in the source notes, we propose SPEER: Sentence-level Planning via Embedded Entity Retrieval. Specifically, we mark each salient entity span with special \"{{ }}\" boundary tags and instruct the LLM to retrieve marked spans before generating each sentence. Sentence-level planning acts as a form of state tracking in that the model is explicitly recording the entities it uses. We fine-tune Mistral and Zephyr variants on a large-scale, diverse dataset of ~167k in-patient hospital admissions and evaluate on 3 datasets. SPEER shows gains in both coverage and faithfulness metrics over non-guided and guided baselines.", "title":"SPEER: Sentence-Level Planning of Long Clinical Summaries via Embedded Entity Retrieval", "authors":[ "Griffin Thomas Adams", "Jason Zucker", "No\u00e9mie Elhadad" ], "id":"Conference", "type":"Poster", "arxiv_id":"2401.02369", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":295 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=0o95CVdNuz", "bibtext":"@inproceedings{\nzhang2024effective,\ntitle={Effective Prompt Extraction from Language Models},\nauthor={Yiming Zhang and Nicholas Carlini and Daphne Ippolito},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=0o95CVdNuz}\n}", "abstract":"The text generated by large language models is commonly controlled by\nprompting, where a prompt prepended to a user\u2019s query guides the model\u2019s\noutput. The prompts used by companies to guide their models are often\ntreated as secrets, to be hidden from the user making the query. They\nhave even been treated as commodities to be bought and sold on market-\nplaces. However, anecdotal reports have shown adversarial users employ-\ning prompt extraction attacks to recover these prompts. In this paper, we\npresent a framework for systematically measuring the effectiveness of these\nattacks. In experiments with 3 different sources of prompts and 11 underly-\ning large language models, we find that simple text-based attacks can in\nfact reveal prompts with high probability. Our framework determines with\nhigh precision whether an extracted prompt is the actual secret prompt,\nrather than a model hallucination. Prompt extraction from real systems\nsuch as Claude 3 and ChatGPT further suggest that system prompts can be\nrevealed by an adversary despite existing defenses in place.", "title":"Effective Prompt Extraction from Language Models", "authors":[ "Yiming Zhang", "Nicholas Carlini", "Daphne Ippolito" ], "id":"Conference", "type":"Poster", "arxiv_id":"2307.06865", "GitHub":[ "https:\/\/github.com\/y0mingzhang\/prompt-extraction" ], "paper_page":"https:\/\/huggingface.co\/papers\/2307.06865", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":2, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":1, "unique_id":296 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=0VLBwQGWpA", "bibtext":"@inproceedings{\nyang2024react,\ntitle={ReAct Meets ActRe: Autonomous Annotation of Agent Trajectories for Contrastive Self-Training},\nauthor={Zonghan Yang and Peng Li and Ming Yan and Ji Zhang and Fei Huang and Yang Liu},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=0VLBwQGWpA}\n}", "abstract":"Language agents have demonstrated autonomous decision-making abilities by reasoning with foundation models. Recently, efforts have been made to train language agents for performance improvement, with multi-step reasoning and action trajectories as the training data. However, collecting such trajectories still requires considerable human effort, by either artificial annotation or implementations of diverse prompting frameworks. In this work, we propose A$^\\mathbf{3}$T, a framework that enables the Autonomous Annotation of Agent Trajectories in the style of ReAct. The central role is an ActRe prompting agent, which explains the reason for an arbitrary action. When randomly sampling an external action, the ReAct-style agent could query the ActRe agent with the action to obtain its textual rationales. Novel trajectories are then synthesized by prepending the posterior reasoning from ActRe to the sampled action. In this way, the ReAct-style agent executes multiple trajectories for the failed tasks, and selects the successful ones to supplement its failed trajectory for contrastive self-training. Realized by policy gradient methods with binarized rewards, the contrastive self-training with accumulated trajectories facilitates a closed loop for multiple rounds of language agent self-improvement. We conduct experiments using QLoRA fine-tuning with the open-sourced Mistral-7B-Instruct-v0.2. In AlfWorld, the agent trained with A$^3$T obtains a 1-shot success rate of 96\\%, and 100\\% success with 4 iterative rounds. In WebShop, the 1-shot performance of the A$^3$T agent matches human average, and 4 rounds of iterative refinement lead to the performance approaching human experts. A$^3$T agents significantly outperform existing techniques, including prompting with GPT-4, advanced agent frameworks, and fully fine-tuned LLMs.", "title":"ReAct Meets ActRe: Autonomous Annotation of Agent Trajectories for Contrastive Self-Training", "authors":[ "Zonghan Yang", "Peng Li", "Ming Yan", "Ji Zhang", "Fei Huang", "Yang Liu" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":297 }, { "bibtex_url":null, "proceedings":"https:\/\/openreview.net\/forum?id=0UK8c2kg7c", "bibtext":"@inproceedings{\nhu2024instructav,\ntitle={Instruct{AV}: Instruction Fine-tuning Large Language Models for Authorship Verification},\nauthor={Yujia Hu and Zhiqiang Hu and Chun Wei Seah and Roy Ka-Wei Lee},\nbooktitle={First Conference on Language Modeling},\nyear={2024},\nurl={https:\/\/openreview.net\/forum?id=0UK8c2kg7c}\n}", "abstract":"Large Language Models (LLMs) have demonstrated remarkable proficiency in a wide range of NLP tasks. However, when it comes to authorship verification (AV) tasks, which involve determining whether two given texts share the same authorship, even advanced models like ChatGPT exhibit notable limitations. This paper introduces a novel approach, termed InstructAV, for authorship verification. This approach utilizes LLMs in conjunction with a parameter-efficient fine-tuning (PEFT) method to simultaneously improve accuracy and explainability. The distinctiveness of InstructAV lies in its ability to align classification decisions with transparent and understandable explanations, representing a significant progression in the field of authorship verification. Through comprehensive experiments conducted across various datasets, InstructAV demonstrates its state-of-the-art performance on the AV task, offering high classification accuracy coupled with enhanced explanation reliability.", "title":"InstructAV: Instruction Fine-tuning Large Language Models for Authorship Verification", "authors":[ "Yujia Hu", "Zhiqiang Hu", "Chun Wei Seah", "Roy Ka-Wei Lee" ], "id":"Conference", "type":"Poster", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Social-AI-Studio\/InstructAV" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "paper_page_exists_pre_conf":0, "unique_id":298 } ]