id
stringlengths 10
10
| title
stringlengths 3
179
| track
stringclasses 1
value | status
stringclasses 3
values | keywords
stringlengths 2
2.39k
| primary_area
stringclasses 21
values | author
stringclasses 501
values | authorids
stringclasses 501
values | aff
stringclasses 1
value | aff_domain
stringclasses 1
value | position
stringclasses 1
value | rating
stringclasses 355
values | confidence
stringlengths 0
19
| soundness
stringclasses 642
values | contribution
stringclasses 596
values | presentation
stringclasses 782
values | rating_avg
float64 0
9
| confidence_avg
float64 0
5
| soundness_avg
float64 0
4
| contribution_avg
float64 0
4
| presentation_avg
float64 0
4
| corr_rating_confidence
float64 -1
1
| project
stringclasses 1
value | github
stringclasses 1
value | Review
listlengths 2
10
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4sDicVEy6M | What Do You See in Common? Learning Hierarchical Prototypes over Tree-of-Life to Discover Evolutionary Traits | main | Active | deep learning;interpretability;prototype-based neural network;phylogeny;computer vision | applications to physical sciences (physics, chemistry, biology, etc.) | 5;6;6;6;6 | 3;2;3;3;4 | 2;3;3;3;2 | 3;2;3;2;3 | 3;3;3;2;3 | 5.8 | 3 | 2.6 | 2.6 | 2.8 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please kindly refer the the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The motivation is interesting and well-justified. The hierarchical structure of biological data presents significant challenges for distinguishing species and identifying evolutionary traits.\n2. The techniques employed in HComP-Net appear technically feasible. The use of contrastive loss for learning clustered features has proven effective in self-supervised learning, while orthogonality loss helps capture diverse features.\n3. The visualization results are clear and impressive.\n4. The paper is well-structured and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper applies deep learning techniques to uncover evolutionary traits in biological data. It leverages contrastive and orthogonality losses to facilitate hierarchical prototype learning. Additionally, the paper introduces over-specificity and discriminative losses to guide and constrain model training. The proposed method demonstrates improved performance over baseline methods across multiple benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It seems that the over-specificity and discriminative losses play opposing roles in the direction of model optimization, which raises the question of whether these two losses might interact and lead to abnormal model convergence. It would be beneficial if the authors could provide some theoretical or experimental analysis on this issue.\n2. This is a minor point, but the overall method appears to be a combination of multiple techniques, making the flowchart somewhat complex and redundant."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper investigates a highly intriguing task: identifying visual features preserved during species evolution. I believe this task is inherently challenging due to the limited and often insufficient quality of training data, making it difficult to obtain stable, semantically interpretable visual features. In this work, the design of the loss function and the associated explanations are intuitive and easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work a novel framework called Hierarchy aligned Commonality through Prototypical Networks (HComP-Net) aimed at discovering evolutionary traits among species by learning hierarchical prototypes over the tree of life. It addresses the challenges of existing prototype-based methods that often produce over-specific prototypes at internal nodes, which can hinder the identification of common traits shared by descendant species. HComP-Net employs a unique over-specificity loss, a discriminative loss to ensure prototypes are absent in contrasting species, and a masking module to maintain classification performance. Through empirical analysis on various datasets, including birds, fishes, turtles, and butterflies, the authors demonstrate that HComP-Net effectively learns accurate, semantically consistent, and generalizable prototypes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The first comment is related to the choice of using visual data to identify common features in species evolution. Given the scarcity of high-quality biological images, especially in the context of vast evolutionary networks, and the inherent issues of imbalance and interference in such images, I wonder if it might be more precise to analyze common traits directly from textual descriptions or anatomical data. Could you please elaborate on the rationale behind prioritizing visual data for this task?\n\n2. The paper introduces several loss functions aimed at ensuring the diversity and effectiveness of the learned prototypes. I would be very interested to see ablation studies on these loss functions to better understand their individual impact.\n\n3. In Figure 4, when comparing the part consistency between HComP-Net and HPNet, different bird images are used. I am curious to know if this choice is justified and, if so, what the reasoning behind it is. Would it not be more appropriate to use the same images for a clearer comparison?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The paper discusses many similarities with ProtoPNet and refers to it often, e.g. “For HPnet, we used the same hyperparameter settings and training strategy as used by ProtoPNet for the CUB-200-2011 dataset”, “We follow the same training strategy as provided by ProtoPNet for the CUB-200-2011 dataset.”. Why ProtoPNet is not used in the comparative experiments (also other non-hierarchical models are used there)?\n- How can the paper help to “advance our understanding of evolution”? E.g. There is a high chance that for the common species (whose pictures are available in large-scale datasets) the same features that were already used to make such a classification will be highlighted. Could the authors present a use case, in which the solution could be successfully used in practice to contribute to the understanding of evolution?\n- How are the representative images (also could be perceived as prototypical images) chosen from the dataset to allow for proper explanations (some images can be taken e.g. from a non-typical angle and do not show the prototypical features well and make the explanations difficult)?\n- Is it possible (and if yes, how) to use this solution in other hierarchical problems (not in the area of biology)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The idea presented in the paper is interesting and original. The idea is quite simple (which is a plus). I liked that the authors aimed to present a method that can help to facilitate the analysis of different species in biology. \n- The paper is well-written, and also presents a lot of nice and clean graphics and examples that make the content understandable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a new framework that can be used for learning biological prototypes within hierarchies while avoiding the learning\nof over-specific features at internal nodes of the genetic tree. The authors perform tests with different datasets including mostly the pictures of birds, fishes and butterflies. The authors focus on the quantitative and qulitative evaluation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The motivation of the paper says that there are many image datasets in the biology, so ML can be used to provide some new visual suggestions for common traits in species belonging to a common group. Nevertheless, such suggestions seem to be more important for some newly discovered species (and not the ones that are already well-known), and for such species there is a possibility of not having so many images. This can decrease the practicality of the method. Statements such as “Furthermore, HComP-Net demonstrates a unique ability to generate novel hypotheses about evolutionary traits, showcasing its potential in advancing our understanding of evolution” are too bold in my opinion.\n- Another issue with a possible practical use of the method is that the proposed solution to provide semantically meaningful information requires human annotation, which can be very subjective. The authors mention this limitation in the appendix, however they do not give any solution for a mitigation. \n- minor: some typos/grammatical mistakes can be found in the paper, e.g. “that are not just shared across all it descendant species but are also” -> “that are not just shared across all ITS descendant species but are also”"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) The CNN backbone used was ConvNeXt-tiny architecture; why the fine-grained accuracy from this architecture is not included in Table 1? Will it be better/worse when compared to other methods on the Table?\n\n2) In terms of semantically meaningful prototypes, could you discuss the possibility of obtaining similar findings using Explainable AI techniques?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality: The research builds on the problem initially formulated in HPNet (Haze et al., 2019), specifically addressing the challenge of over-specific prototypes. The authors introduced over-specificity and discriminative loss functions, enabling HComP-Net to learn prototypes that adhere to the hierarchical structure of a phylogenetic tree. This approach enhances interpretability and brings a fresh perspective to prototype learning.\n\nQuality: Given the identified problem of over-specific prototypes, the authors demonstrate the efficacy of their methods across fine-grained classification tasks, also showing advancements in interpretability. The proposed model performs consistently well in these tasks, validating its quality and effectiveness.\n\nClarity: The paper is well-organized, with clear explanations of the background, literature, methodology, experimental setup, and results. This clarity enhances the readability and accessibility of the research, making its contributions understandable and well-supported.\n\nSignificance: This work highlights the potential of interpretable representation learning driven by structured hierarchical knowledge. By using a phylogenetic tree for guidance, the model provides insights into trait evolution and aligns closely with the biology-inspired data structure, marking a contribution at the intersection of AI and biology."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work presents an extended approach for hierarchical prototype learning by training a neural network to discover hierarchical prototypes using structured information from a phylogenetic tree. Building upon HPNet (Haze et al., 2019), this approach, termed HComP-Net, contrasts with traditional models that use flat structures by incorporating loss functions such as over-specificity loss and discriminative loss. These functions enable HComP-Net to learn prototypes that align with the hierarchical structure of the tree, enhancing interpretability and consistency. Empirical results highlight HComP-Net’s ability to produce accurate, semantically coherent prototypes transferable to unseen species. Tested on a dataset of 190 bird species and additional organisms, the model performs better than baseline models. Additionally, HComP-Net has been shown to generate visual hypotheses on evolutionary traits, offering insight into traits across various levels of the phylogenetic tree. This research underscores the potential of interpretable representation learning using structured hierarchical prior knowledge."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) On the Need for a Separate Masking Module\nThe fact that an additional masking module is required suggests that the initial contributions, particularly the over-specificity and discriminative loss functions, may not fully address the issue of over-specific prototypes. This need points to a limitation in the proposed loss functions' effectiveness in preventing prototypes from becoming overly tailored to specific species. Ideally, a more robust solution would directly manage prototype specificity through the loss functions alone, reducing reliance on extra modules that could complicate the model and potentially impact interpretability or scalability.\n\n2) On the Role of the Phylogenetic Tree in Classification\nIn section 5.1, the authors suggest that achieving high classification accuracy is not the primary goal. However, providing the model with additional information, like a phylogenetic tree during training and inference, could indeed aid classification performance. The tree may allow the model to leverage hierarchical relationships, which could enhance classification accuracy by using shared traits among related species. Thus, there seems to be a disconnect between the claim of not prioritizing classification accuracy and the model's design, which inherently includes information that could enhance it."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Why does the over-specificity loss adopt a specific log-tanh loss form? Doesn’t it simply establish an arbitrary criterion for identifying overly specific prototypes? This choice requires further explanation and discussion."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The method details are well-demonstrated, with a clear overview and examples that make the content easy to follow.\n\nThe study presents a novel approach to improving the interpretability of prototypical networks, allowing for more accurate representation of parent-child relationships.\n\nIt enhances the specialization of deep networks applied in scientific discovery, particularly in phylogenetic analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses a scientific problem: discovering evolutionary traits in biology from images. To tackle this, it proposes the Hierarchy-Aligned Commonality through Prototypical Network (HComP-Net). The primary objective is to reduce over-specific prototypes that lose common features expected to be observed in all species sharing a common ancestor. To achieve this, the approach applies contrastive, orthogonality, and discriminative losses to the prototypes, and introduces over-specificity loss and masking to mitigate over-specific prototypes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Two limits of experimental demonstration. \n1. The primary contribution lies in incorporating parent-child relationships to reduce over-specific prototypes within the contrastive losses, while the architecture does not appear to include specific structures for establishing a prototype hierarchy.\nIn my understanding, proposing a hierarchical structure is a contribution of HPNet, not HCompPNet. This point should be clarified in title and method description. \n2. The exclusion of certain related works leaves me a question about novelty on practical impact. For instance, PIPNet, a recent and closely related method employing self-supervised learning (2023), is not included in the comparison. It only uses HPNet from 2019. However, semantic gaps has relevance to over-specificity, and authors also mention strong motivation from PIPNet. The reason why the network is excluded needs more detailed explanation or comparison in experiments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024what,\ntitle={What Do You See in Common? Learning Hierarchical Prototypes over Tree-of-Life to Discover Evolutionary Traits},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4sDicVEy6M},\nnote={under review}\n}"
},
"abstract": {
"value": "A grand challenge in biology is to discover evolutionary traits---features of organisms common to a group of species with a shared ancestor in the tree of life (also referred to as phylogenetic tree). With the growing availability of image repositories in biology, there is a tremendous opportunity to discover evolutionary traits directly from images in the form of a hierarchy of prototypes. However, current prototype-based methods are mostly designed to operate over a flat structure of classes and face several challenges in discovering hierarchical prototypes, including the issue of learning over-specific prototypes at internal nodes. To overcome these challenges, we introduce the framework of Hierarchy aligned Commonality through Prototypical Networks (HComP-Net). The key novelties in HComP-Net include a novel over-specificity loss to avoid learning over-specific prototypes, a novel discriminative loss to ensure prototypes at an internal node are absent in the contrasting set of species with different ancestry, and a novel masking module to allow for the exclusion of over-specific prototypes at higher levels of the tree without hampering classification performance. We empirically show that HComP-Net learns prototypes that are accurate, semantically consistent, and generalizable to unseen species in comparison to baselines."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep learning",
"interpretability",
"prototype-based neural network",
"phylogeny",
"computer vision"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9c41d5e1c49689ab4c83247c9ad9d777f3a49966.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "What Do You See in Common? Learning Hierarchical Prototypes over Tree-of-Life to Discover Evolutionary Traits"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4sJ2FYE65U | Neural Multi-Objective Combinatorial Optimization via Graph-Image Multimodal Fusion | main | Active | Neural Multi-Objective Combinatorial Optimization;Multimodal Fusion;Deep Reinforcement Learning | other topics in machine learning (i.e., none of the above) | 5;6;6;6;8 | 2;3;3;3;3 | 2;3;3;3;3 | 2;3;3;3;3 | 3;3;3;4;4 | 6.2 | 2.8 | 2.8 | 2.8 | 3.4 | 0.612372 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What's the training loss of the GIMF?\n\n2. Can GIMF obtain a Pareto set of solutions?\n\n3. I noticed the authors used multi-modal fusion, but I don't quite understand this part. Does it mean that each subproblem requires training a single model and then performing model fusion at the end?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The writing is good.\n\n2. The experiments are detailed, and the results are competitive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to fully leverage the intrinsic features of problem instances by proposing a novel graph-image multimodal fusion framework for solving multi-objective combinatorial optimization (MOCO). The authors introduce the concept of \"image\" for MOCO to better capture the spatial structure of problem instances, enhancing the learning process. They also propose a problem-size adaptive resolution strategy to improve generalization. Finally, the paper presents a multimodal fusion mechanism with modality-specific bottlenecks to efficiently integrate graph and image information."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe the role of the image concept in CO is questionable. To some extent, using images in CO results in information loss and requires more space to represent. As far as I know, many Euclidean TSP models, like [1, 2], use positions directly as input, which requires less space and provides more precise information.\n\n2. Compared to typical neural MOCO methods, GIMF uses sparse matrix images as input, resulting in larger neural network sizes and an inability to handle larger-scale routing problems."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "In Table 1 and Table 2, the reported times of GIMF-P and GIMF-C are sometimes smaller than PMOCO and CHN, what is the reason for this phenomenon?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The major contribution of this paper is its integration of both graph and image modalities to enhance the representation learning for MOCO problems. The construction of coordinate images and the use of PSAR strategy are innovative steps that address the limitations of relying solely on graph information. The proposed MSB in multimodal fusion mechanism is also a novel contribution.\n\n2. The paper is well-organized and written in a clear and concise manner. The introduction effectively sets the stage by outlining the challenges in MOCO and the motivation behind the GIMF framework. The preliminary section clearly describes the definition of the MOCO problem and related concepts, as well as the graph transformer for MOCO. The methodology section is detailed, providing a clear explanation of the image construction process, the PSAR strategy, and the multimodal fusion mechanism.\n\n3. The significance mainly comes from its novelty.Specifically, leveraging a multimodal approach that incorporates image-modal information, which has the potential to improve many existing neural MOCO methods. The paper is also likely to inspire further research in constructing and learning from images of MOCO problems.\n\n4. The experimental results suggest that GIMF does not obviously increase computational time of the neural MOCO basebone. The major innovations PSAR and MSB are validated by ablation study."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a novel graph-image multimodal fusion (GIMF) framework that aims to enhance neural multi-objective combinatorial optimization (MOCO) methods. The GIMF framework integrates graph and image information of problem instances, which is designed to overcome the limitations of existing neural MOCO methods that rely solely on graph-modal information. The main contribution of the proposed method is the coordinate image construction, which provides complementary information to the graph representation.To improve the model's generalization across different problem sizes, a Problem-size Adaptive Resolution (PSAR) strategy is proposed during the image construction process, which helps maintain a stable density for both the image and patches. A multimodal fusion mechanism with Modality-Specific Bottlenecks (MSB) is designed to efficiently couple graph and image information. \n\nThe GIMF framework is implemented with two state-of-the-art neural MOCO backbones, namely CNH and PMOCO. Experimental results on classic MOCO problems demonstrate that GIMF can improve neural MOCO methods by providing image-modal information and exhibits superior generalization capability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposed method could improve the performance of CNH and PMOCO, as well as their augment variants. However, sometimes the improvement seems marginal. In Table 1 and Table 2, the reported improvements are all mostly less than 0.001, and sometimes are as small as 0.0001. Meanwhile, the reported best results can not significantly outperform SOTA baselines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Is this analogous to a game played on a chessboard? Are the authors providing definitions and citations related to reinforcement learning? Are you aiming to transform the graph problem into a gameplay problem on a chessboard?\n- Has reinforcement learning for MOCO in chessboard games been well studied elsewhere? If MOCO, such as TSP, is defined within the context of a chessboard game, isn’t it relatively straightforward? Does this require a graph modality, or can the image modality defined by the authors alone be sufficient to address the MOCO problems?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The main novelty of this paper lies in defining the image modality alongside the graph modality, offering a new perspective for addressing challenges in MOCO.\n- This approach enhances conventional heuristic algorithms by leveraging the combined information from both modalities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel Graph-Image Multimodal Fusion (GIMF) framework designed to enhance multi-objective combinatorial optimization (MOCO) methods. By integrating both graph and image information from problem instances, the framework effectively addresses the limitations associated with relying solely on graph-modal information, particularly in the context of bi- and tri-objective traveling salesman problems (TSP)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- If the image modality has been defined, why still continue to use the graph modality? How could one approach solving an MOCO problem using only the image modality? Additionally, could you provide ablation studies to support this?\n- Since the graph can also be viewed as a transition matrix, what is the relationship between reinforcement learning and the authors' algorithm?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. What does \"$\\bm{\\pi_i}$\" represent in the formula for calculating $\\nabla\\mathcal{L}(\\bm{\\theta})$ on line 136, or should it be changed to \"$\\pi_i$\"?\n\n2. What is the meaning of the line from $\\pi_t$ to $h_c$ in Figure 2? The authors should supplement the relationship between $\\pi_t$ and $h_c$ in the main text.\n\n3. Why choose the dimension of patch as $w=h=16$? The authors should provide an explanation or conduct ablation experiments."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThis paper is well-organized and clear writing.\n2.\tThe proposed method demonstrates novelty.\n3.\tExperiments show that GIMF performs better on classic MOCO problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a generic graph-image multimodal fusion (GIMF) framework that integrates graph and image information of the problem instances to enhance neural MOCO. The framework consists of three main components: (1) a constructed coordinate image (2) a problem-size adaptive resolution strategy and (3) a multimodal fusion mechanism. Experimental results demonstrate its effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Some details and parameter settings were not explained clearly (see Questions below)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "N/A."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "S1. The GIMF framework successfully combines graph and image information, enriching representation learning for MOCO problems. \n\nS2. The PSAR strategy and modality-specific bottlenecks for multimodal fusion are well-justified and empirically validated.\n\nS3. Extensive experiments are conducted."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel approach for MOCO through graph-image multimodal fusion. The framework incorporates a constructed coordinate image and efficient multimodal fusion. Experimental results on MOCO problems show the advance of the proposed GIMF."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. The improvement achieved by the proposed methods appears to be marginal.\n\nW2. The computational cost of the proposed GIMF framework seems considerably higher than state-of-the-art methods like EMNH. It seems that the performance gains come from a cost of efficiency. \n\nW3. Constructing images seems relatively straightforward for TSP problems, but how does this approach generalize to other real-world scenarios? For some tasks, image construction may be challenging—how do the authors envision addressing this limitation?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper proposes a graph-image multimodal fusion framework for neural multi-objective combinatorial optimization."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024neural,\ntitle={Neural Multi-Objective Combinatorial Optimization via Graph-Image Multimodal Fusion},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4sJ2FYE65U},\nnote={under review}\n}"
},
"abstract": {
"value": "Existing neural multi-objective combinatorial optimization (MOCO) methods still exhibit an optimality gap since they fail to fully exploit the intrinsic features of problem instances. A significant factor contributing to this shortfall is their reliance solely on graph-modal information. To overcome this, we propose a novel graph-image multimodal fusion (GIMF) framework that enhances neural MOCO methods by integrating graph and image information of the problem instances. Our GIMF framework comprises three key components: (1) a constructed coordinate image to better represent the spatial structure of the problem instance, (2) a problem-size adaptive resolution strategy during the image construction process to improve the cross-size generalization of the model, and (3) a multimodal fusion mechanism with modality-specific bottlenecks to efficiently couple graph and image information. We demonstrate the versatility of our GIMF by implementing it with two state-of-the-art neural MOCO backbones. Experimental results on classic MOCO problems show that our GIMF significantly outperforms state-of-the-art neural MOCO methods and exhibits superior generalization capability."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neural Multi-Objective Combinatorial Optimization",
"Multimodal Fusion",
"Deep Reinforcement Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a2bbd6277481ae289d269a16e8ccb6fc25bdb1a0.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Neural Multi-Objective Combinatorial Optimization via Graph-Image Multimodal Fusion"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4sJIgdErt1 | Unified Framework for Causal Discovery and Long-term Forecasting in Non-stationary Environments | main | Withdraw | causal discovery;long-term forecasting | causal reasoning | Har Simrat Singh;Biwei Huang | ~Har_Simrat_Singh1;~Biwei_Huang1 | 0 | 0 | 0 | 0 | 0 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nsingh2024unified,\ntitle={Unified Framework for Causal Discovery and Long-term Forecasting in Non-stationary Environments},\nauthor={Har Simrat Singh and Biwei Huang},\nyear={2024},\nurl={https://openreview.net/forum?id=4sJIgdErt1}\n}"
},
"abstract": {
"value": "Non-stationary data is prevalent in various real-world domains such as climate science, economics, and neuroscience, presenting significant challenges for tasks like forecasting and causal discovery from observational data. Existing approaches often operate under the assumption that the data is stationary. In this work, we introduce a unified framework that combines long-term forecasting and causal discovery with non-linear relations in a non-stationary setting. Specifically, we assume that the nonlinear causal relations in the observed space can be transformed into linear relations in the latent space via projections. In addition, we model the non-stationarity in the system as arising from time-varying causal relations. The proposed model demonstrates that adopting a causal perspective for long-term forecasting not only addresses the limitations of each task but also makes the causal process identifiable, enhances interpretability, and provides more reliable predictions. Moreover, our approach reformulates causal discovery into a scalable, non-parametric deep learning problem. Through experiments on both synthetic and real-world datasets, we show that our framework outperforms baseline methods in both forecasting and causal discovery, underscoring the benefits of this integrated approach."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Har_Simrat_Singh1",
"~Biwei_Huang1"
]
},
"authors": {
"value": [
"Har Simrat Singh",
"Biwei Huang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"causal discovery",
"long-term forecasting"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "singh|unified_framework_for_causal_discovery_and_longterm_forecasting_in_nonstationary_environments"
},
"pdf": {
"value": "/pdf/ac2aaade2cdf5d96e8869e7411d4caba8a274376.pdf"
},
"presentation": null,
"primary_area": {
"value": "causal reasoning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Unified Framework for Causal Discovery and Long-term Forecasting in Non-stationary Environments"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
||||||||||
4sJJixGIZX | Online Continual Graph Learning | main | Active | continual learning;online learning;graph neural network | transfer learning, meta learning, and lifelong learning | 3;5;5;6 | 4;4;4;4 | 2;2;2;3 | 1;2;2;2 | 2;2;2;3 | 4.75 | 4 | 2.25 | 1.75 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. One of the paper's main strengths is the formal introduction of the Online Continual Graph Learning (OCGL) framework.\n2. The authors develop a benchmarking environment specifically for OCGL, including multiple datasets and evaluations of various continual learning methods. \n3. The experimental setup and the detailed analysis provided in the paper are thorough and well-constructed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel framework, OCGL, which addresses the challenges of applying continual learning principles to graph-structured data in an online setting. It innovatively formulates the problem of learning from a continuously evolving graph, emphasizing the necessity to manage computational complexity and avoid catastrophic forgetting—a common issue where a model loses previously learned information upon learning new data. To facilitate research in this area, the authors develop a benchmarking environment with tailored datasets to evaluate various continual learning methods adapted to graph settings. They also propose practical solutions such as neighborhood sampling to maintain computational efficiency as the graph grows."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed problem is novel, however, the detailed appliable scenario for such OCGL framework should be further explained, especially on graph data. \n2. The baselines chosen in this paper are all Continual learning methods. More methods for the online learning setting should be included.\n3. Also, as a benchmark paper, it would be beneficial to introduce more new datasets.\n4. The contribution of this paper seems limited to me. The authors introduced a new problem setting OCGL for graph learning and presented a benchmarking environment for OCGL, but did not propose a novel method to solve this problem. Although I understand benchmarking papers are also important to the research community, I believe that the contribution in this case may not be significantly sufficient for inclusion in this conference.\n5. The third contribution, using random sampling to address the complexity of multi-hop aggregation, is a very easy-to-get idea, and seems trivial to me."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In the Mini-batching part of Section 3.1, what does it mean by 'L>1 is not in contrast with the growing mechanism of the graph'? I could understand what the authors want to express should be that the entire graph may be required for aggregating multi-hop information, but the writing here seems confusing.\n\n2. It is a little bit confusing whether the proposed strategy allows the model to access the entire graph that contains previous nodes. It is stated that the up-to-date graph Gt is stored in a Past Information Store (PIS) system, but only allow limited use of information from PIS. It is unclear what kind of usage is deemed as 'limited'. Additionally, it is also stated that the PIS is different from a 'eventual memory buffer'. This is also confusing. If the PIS contains the completely graph with all previous information, then what is the role of the 'eventual memory buffer', and why we still need such a buffer?\n\n3. In the 'training details' part in the experiment section, when talking about the batch size, how is each batch used? Given a new task with N data, will the model be trained on the N data for several epochs, and in each epoch, the batches are fed into the model sequentially?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Online continual graph learning has not been fully explored, and this work makes some contribution in this direction. \n\n2. Compared to existing continual graph learning works, this work adopts a more practical hyperparameter selection strategy that only use a few tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to formulate the setting of online continual graph learning considering the efficiency of batch processing and graph topology, and proposes a set of benchmark datasets for online continual graph learning. Additionally, from the technical perspective, the authors address the challenge of GNN memory usage. \n\nWithin the context of online continual graph learning, the graphs are defined as a time series, in which the graph snapshot at each time stamp t contains the nodes and edges collected from the starting time till t. Each new snapshot is created when a new node is added. The newly attached information includes the new node, its neighbors, and the node features."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main weakness is the inconsistence between the proposed setting and the actual experiments. Although the paper described an online learning setting, but the task construction in experiments is still same as the continual graph learning setting with task boundaries. As mentioned in the paper, 'the graph grow with nodes from two new classes at a time', then the incremental manner is same as a normal class incremental learning instead of an online learning setting. I would recommend that the experiments should be consistent with the proposed setting,in which each new snapshot could contain one node or a mini-batch of new nodes, but not necessarily a new task containing new classes.\n\n2. The adopted baselines are a little bit out of date. Besides, only TWP is specially designed for graph data, while the others don't consider the graph structures. Admittedly the authors have discussed why some baselines are not adopted, but the mentioned ones are all proposed no later than 2021. Therefore, it is not convincing enough that the adopted methods can represent the state-of-the-art performance. I would recommend that the recent continual graph learning works proposed from 2022 to 2024 could be thoroughly investigated, discussed, and compared whenever appropriate."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The introduction of the Online Continual Graph Learning (OCGL) framework extends continual learning to dynamic, graph-structured data.\n\n2. The paper provides a thorough evaluation of multiple continual learning methods, adapting them for online graph learning.\n\n3. The proposed neighborhood sampling strategy effectively addresses the computational and memory challenges of multi-hop neighborhood aggregation in GNNs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces the Online Continual Graph Learning (OCGL) framework to handle non-stationary streaming data in graph structures. It benchmarks several continual learning methods on four datasets, adapting them for the online graph learning scenario. The authors propose a neighborhood sampling strategy to address the issue of neighborhood expansion in Graph Neural Networks (GNNs) and reduce computational complexity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The benchmarks focus mainly on node classification tasks, and extending the framework to more diverse graph-based applications (e.g., edge prediction, link prediction) could strengthen the paper's contributions.\n\n2. The paper primarily compares traditional continual learning methods adapted for the Online Continual Graph Learning (OCGL) framework. It does not include comparisons with more recent state-of-the-art continual graph learning methods proposed in the recent three years, such as MSCGL[1] and UGCL[2].\n\n [1] J. Cai, X. Wang, C. Guan, Y. Tang, J. Xu, B. Zhong, and W. Zhu, ''Multimodal continual graph learning with neural architecture search,'' in Proceedings of the ACM Web Conference, 2022, pp.1292–1300.\n\n [2] T. D. Hoang, D. V. Tung, D.-H. Nguyen, B.-S. Nguyen, H. H.Nguyen, and H. Le, ''Universal graph continual learning,'' Transactions on Machine Learning Research, 2023.\n\n3. While the sampling strategy improves computational efficiency, it can negatively impact model accuracy. \n\n4. The paper predominantly concentrates on experimental evaluation and lacks an in-depth theoretical analysis of the proposed method's properties, such as convergence, computational complexity, and theoretical bounds on forgetting."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- the studied problem is an important research question\n\n- I like the attempts that the author tried to take a more systematic approach toward the problem"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an Online Continual Graph Learning (OCGL) framework designed for learning in dynamic graph environments where data arrives in a streaming fashion. The authors address challenges of catastrophic forgetting in graph-based continual learning, particularly when working with graph neural networks (GNNs) that rely on neighborhood information, which can lead to high memory and computational costs. The proposed OCGL framework is evaluated on four node classification datasets, using modified continual learning methods suited for online learning. They propose neighborhood sampling as a strategy to address neighborhood expansion challenges, which could otherwise lead to prohibitive costs in dynamic graph settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. the technical content of the paper does not address the research question proposed by the authors. For example\n a. one of the claimed distributions is the formulation of the so-called online GCL. However, I do not see any formal formulation of the problem. Only a generic description is provided. For example, to fulfil the claim, one would naturally expect to see the data model, the definition and format of the leaner, and the properties and requirements for an effective learner in this scenario. None of these information is provided.\n b. another claim is that online GCL is a new learning paradigm and different from GCL. One would expect to see a detailed comparison between these two. How are they different exactly? How much is the difference?\n\n2. there are some statements that are not factually correct. For example, continual learning is inherently an online setting. An ideal continual learning algorithm should adaptively learn from the new data without the need to access previous data. However, this can be proved theoretically impossible. Therefore, many continual learning algorithms compromise by allowing partial access to historical data. Even for online systems, storing historical data is also allowed. Regarding the task boundary, there have been many studies that looked at the continual learning setting without a clear task boundary. These studies have been under the terms such as \"task-free continual learning\" and \"domain-free continual learning\"[1]. Furthermore, it is not clear what exactly task-boundary means in the paper.\n\n3. the proposed technique is standard and the paper has no conceivable novelty or contribution. The neighbourhood explosion problem is a standard issue in GNN training even for the case of training GNN on static graphs and neighborhood sampling has been de-facto in training GNN. The issue of changing graph structure in GCL has been documented and studied in [2].\n\n[1] \"Cglb: Benchmark tasks for continual graph learning.\" Advances in Neural Information Processing Systems 35 (2022): 13006-13021.\n\n[2] \"Towards robust graph incremental learning on evolving graphs.\" International Conference on Machine Learning. PMLR, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024online,\ntitle={Online Continual Graph Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4sJJixGIZX},\nnote={under review}\n}"
},
"abstract": {
"value": "The aim of Continual Learning (CL) is to learn new tasks incrementally while avoiding catastrophic forgetting. Online Continual Learning (OCL) specifically focuses on learning efficiently from a continuous stream of data with shifting distribution. While recent studies explore Continual Learning on graphs exploiting Graph Neural Networks (GNNs), only few of them focus on a streaming setting. Many real-world graphs evolve over time and timely (online) predictions could be required. However, current approaches are not well aligned with the standard OCL literature, partly due to the lack of a clear definition of online continual learning on graphs. In this work, we propose a general formulation for online continual learning on graphs, emphasizing the efficiency of batch processing while accounting for graph topology, providing a grounded setting to analyze different methods. We present a set of benchmark datasets for online continual graph learning, together with the results of several methods in CL literature, adapted to our setting. Additionally, we address the challenge of GNN memory usage, as considering multiple hops of neighborhood aggregation can require access to the entire growing graph, resulting in prohibitive costs for the setting. We thus propose solutions to maintain bounded complexity for efficient online learning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"continual learning",
"online learning",
"graph neural network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d9ddedd8b36c954be891e37b0a5ef0ffc09f29fe.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/7161559470fcdcdf01efae8038e7fff391441842.zip"
},
"title": {
"value": "Online Continual Graph Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4tiTQ33sDH | Unlocking the Power of GANs in Non-Autoregressive Text Generation | main | Active | Language GANs;Non-Autoregressive Model;Text Generation | generative models | 3;3;3;5 | 2;5;3;4 | 2;2;3;2 | 2;2;3;3 | 2;2;3;3 | 3.5 | 3.5 | 2.25 | 2.5 | 2.5 | 0.258199 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please check the weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work is pioneering in applying GANs within a non-autoregressive structure for text generation, presenting novel solutions like Position-Aware Self-Modulation and Dependency FFN to tackle inherent limitations in GAN-based text generation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel model called Adversarial Non-autoregressive Transformer (ANT) aimed at enhancing the efficiency and performance of Generative Adversarial Networks (GANs) in text generation. Unlike conventional GANs that rely on autoregressive (AR) structures, ANT leverages a non-autoregressive (NAR) framework, allowing for parallel computation and significantly reducing latency in both training and inference. Key contributions include the introduction of Position-Aware Self-Modulation to enhance representation diversity and Dependency Feed Forward Network (Dependency FFN) to improve dependency modeling. Experimental results show ANT's competitive performance with AR models in terms of quality while achieving lower latency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The datasets chosen for experimental validation are relatively simple, lacking common tasks like translation and summarization, which weakens the persuasiveness of the results.\n2. The issue described in line 57, \"the dynamic weight assignment process becomes unstable during the fragile training of GANs,\" lacks references, in-depth analysis, or detailed description of the phenomenon, making it difficult to thoroughly understand this problem."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "refer to the comments"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors propose Position-Aware Self-Modulation (PASM), which provides more diverse and effective latent variable representations, enhancing the model's ability to capture the diversity of different words in sentences.\n- To improve the dependency among the decoding procedure, the authors propose Dependency Feed Forward Network (DFFN), which can lead to better performance.\n- The authors conduct extensive experiments to validate the effectiveness of their proposed model, comparing it against mainstream models and demonstrating its competitive performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the application of Generative Adversarial Networks (GANs) in non-autoregressive (NAR) text generation, addressing the limitations of existing GAN-based text generation models that typically rely on autoregressive structures. The authors identify two main issues with current NAR models: the lack of diversity in latent variable representations and the instability of attention mechanisms during GAN training. To tackle these problems, they introduce two useful techniques: Position-Aware Self-Modulation (PASM) and Dependency Feed Forward Network (DFFN). The experimental results demonstrate that ANT achieves comparable performance to the baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The title of the paper is \"Unlocking the Power of GANs xxx.\" Generally, the strength of GANs lies in the training of the generator and discriminator through a game-theoretic mechanism. However, the main focus of this paper is not on GANs but rather on non-autoregressive text generation. I do not believe the power of GANs lies in non-autoregressive modeling.\n- While the authors compare their model, ANT, to several state-of-the-art models, it appears they have selectively chosen only strong non-autoregressive baselines that utilize GANs. Other baseline methods, such as Huang et al. (ICML 2022), should also be discussed to strengthen the claims regarding the model's superiority.\n- Additionally, the experiments are conducted primarily on specific tasks and datasets, which are somewhat outdated. It would be valuable to assess how well the model generalizes to other text generation tasks, such as summarization, dialogue generation, and long-form text generation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide empirical evidence or further analysis to support the claim that Position-Aware Self-Modulation significantly improves generation diversity compared to standard practices in NAR models?\n2. How does the Dependency Feed Forward Network provide a clear advantage over traditional FFNs in the context of GAN training, and what experimental results demonstrate this, beyond the marginal improvement shown in Figure 5?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well-structured and provides clear explanations of the proposed methods and their implications, making it easy to follow the authors' reasoning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the Adversarial Non-autoregressive Transformer (ANT), a GAN-based model for efficient text generation. It proposes two main contributions: Position-Aware Self-Modulation and Dependency Feed Forward Network (Dependency FFN). The study claims that ANT achieves comparable performance to mainstream models with significantly lower latency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper may lack a comprehensive comparison with state-of-the-art non-autoregressive models, which is crucial for establishing the significance of the proposed ANT model: Glancing Transformer (Qian et al, 2021), Fully-NAT (Gu et al. 2021), DA-Transformer (Huang et al. 2022), SUNDAE (Nikolay Savinov et al. 2022), etc. \n2. While the paper claims that Position-Aware Self-Modulation enhances generation diversity, it appears to be a common practice to input identical [MASK] tokens plus positional embeddings in NAR models, which also achieve strong performance during decoding. The paper does not provide direct evidence to show that the similar representation approach hinders the model's generation ability or that Position-Aware Self-Modulation offers a significant improvement over this standard practice. This lack of evidence makes it difficult to assess the true impact of this contribution.\n3. The Dependency Feed Forward Network is presented as a solution to the instability of word dependencies during GAN training. However, the provided evidence in Figure 5 shows only a marginal improvement, with the gap in FED not exceeding 0.001. Such a small difference raises questions about the practical significance of this improvement, especially considering the computational overhead it might introduce."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses section"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This work pioneers the development of language GANs based on non-autoregressive (NAR) structures, addressing the high latency issues inherent in autoregressive (AR) models. By generating all words in parallel, the Adversarial Non-autoregressive Transformer (ANT) achieves high-efficiency generation, significantly reducing both training and inference times.\n\nThe introduction of Position-Aware Self-Modulation and Dependency Feed-forward Network (Dependency FFN) addresses critical challenges in GAN-based NAR models. Position-Aware Self-Modulation enhances the diversity of hidden representations, leading to the generation of more varied and high-quality words in sentences. Dependency FFN improves the stability and accuracy of dependency modeling, resulting in more grammatically coherent outputs compared to traditional attention mechanisms."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduced the Adversarial Non-autoregressive Transformer (ANT), a pioneering study of building language GANs based on non-autoregressive (NAR) structures to address the exposure bias problem and reduce latency in training and inference. ANT tackles two key issues: the lack of diversity in latent variable representations by proposing Position-Aware Self-Modulation, and the inaccurate word dependency modeling in Transformers by adopting a Dependency Feed Forward Network. Experimental results show that ANT achieves performance comparable to mainstream models in a single forward pass, with promising applications in latent interpolation and semi-supervised learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The baselines selected for comparison in the paper are quite outdated, with the chosen non-autoregressive (NAR) models being from 2018, 2019, and 2021. Given the rapid advancements in the field of natural language processing (NLP), it is crucial to compare the proposed model against the most recent and state-of-the-art NAR models to provide a more accurate assessment of its performance. The absence of comparisons with the latest models raises concerns about the relative effectiveness and competitiveness of the proposed approach.\n\nThe experiments conducted in the paper are limited to the COCO and EMNLP datasets, which do not provide a comprehensive evaluation of the model's capabilities. To thoroughly assess the performance and robustness of the proposed NAR model, it is essential to test it on a wider range of datasets, including those for machine translation (e.g., WMT), natural language inference (e.g., SNLI), and text summarization. Evaluating the model on these additional datasets would offer valuable insights into its effectiveness across different NLP tasks, particularly in handling longer texts, which is a critical aspect of many real-world applications. The current dataset selection limits the generalizability and applicability of the findings.\n\nIf these aspects were addressed with more comprehensive experiments, it would significantly improve the evaluation and increase the overall score of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024unlocking,\ntitle={Unlocking the Power of {GAN}s in Non-Autoregressive Text Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4tiTQ33sDH},\nnote={under review}\n}"
},
"abstract": {
"value": "Generative Adversarial Networks (GANs) have been studied in text generation to tackle the exposure bias problem. Despite their remarkable development, they adopt autoregressive structures so suffering from high latency in both training and inference stages. Although GANs have potential to support efficient generation by adopting non-autoregressive (NAR) structures, their explorations in NAR models are extremely limited. In this work, we conduct pioneering study of building language GANs based on NAR structures. We identify two issues that constrain the performance of GAN-based NAR models. Firstly, existing methods of incorporating latent variables provide highly similar representations which cannot describe the diversity of different words in sentences. We tackle this problem by proposing Position-Aware Self-Modulation, providing more diverse and effective representations. Secondly, the attention mechanism in Transformer cannot accurately build word dependencies in the unstable training of GANs, and we adopt Dependency Feed Forward Network to enhance the model capacity in dependency modeling. Armed with these two facilities, we propose a GAN-based NAR model, Adversarial Non-autoregressive Transformer (ANT). The experimental results demonstrate that ANT can achieve comparable performance with mainstream models in a single forward pass and has great potential in various applications like latent interpolation and semi-supervised learning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Language GANs",
"Non-Autoregressive Model",
"Text Generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8b144a25b2e52968b9926e21981469074fa6f33f.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Unlocking the Power of GANs in Non-Autoregressive Text Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4ua4wyAQLm | Local Patterns Generalize Better for Novel Anomalies | main | Active | Global Patterns; Local Patterns; Image-Text Alignment Module; Cross-Modality Attention; Temporal Sentence Generation; State Machine Module | applications to computer vision, audio, language, and other modalities | 5;5;6;6 | 4;3;3;3 | 3;3;4;3 | 3;3;3;3 | 2;3;4;4 | 5.5 | 3.25 | 3.25 | 3 | 3.25 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "* How to further improve the generalization of local patterns without relying on visual-linguistic models?\n\n* How does the method ensure adaptability to low-resolution videos in different datasets and real-world application scenarios?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* By using image-text alignment and cross-modality attention, this method successfully extracts local patterns that remain consistent across varying visual data, enhancing its ability to detect novel anomalies.\n\n* The State Machine Module (SMM) and motion estimation integrate temporal clues, effectively strengthening the detection capabilities by including sequential information for more accurate anomaly detection.\n\n*By combining visual and textual features in identifying local patterns, the model benefits from enhanced robustness and accuracy across different visual domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel framework for video anomaly detection, aiming to improve generalization for detecting new, unseen anomalies by focusing on local patterns rather than global event patterns. Traditional video anomaly detection (VAD) methods often struggle with unseen anomalies, as they primarily analyze global patterns. This framework utilizes image-text alignment and cross-modality attention to identify and refine local patterns while enhancing them with temporal information. Core components include the Image-Text Alignment Module (ITAM), Cross-Modality Attention Module (CMAM), and State Machine Module (SMM). The proposed approach demonstrates superior performance on several benchmark datasets, suggesting it can generalize better to novel anomalies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The method relies on the detection effect of visual-linguistic modeling (VLM), whereas multi-object image processing may ignore contextual information and affect performance. The authors need to provide more analysis on the ablation of the foundational models.\n\n* The need for multiple layers of modules (e.g., ITAM, CMAM, SMM) to work jointly results in a complex training process that consumes more time and resources. Please provide a comparison of the spatio-temporal complexity analysis with previous methods to demonstrate the practical effectiveness of the method.\n\n* In low-resolution scenes, the generated text description loses detail information, which affects the anomaly detection effect."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The paper mentions limitations related to the reliance on VLM-based object detectors. How can this limitation be addressed in future work?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed two-stage framework for identifying local patterns is novel and well-motivated. The use of image-text alignment and cross-modality attention is interesting and potentially useful."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel framework for video anomaly detection (VAD) that focuses on identifying local patterns to better generalize to unseen anomalies. The framework employs a two-stage process: first, it uses image-text alignment to locate local patterns that are consistent across visual data variances; second, it refines these patterns using cross-modality attention. To further enhance the model, the authors introduce temporal clues through a State Machine Module (SMM) and temporal motion estimation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The paper lacks a clear discussion of the computational complexity of the proposed framework. Given the use of large language models (LLMs) and other complex modules, it is important to address the efficiency of the approach.\n2) What's the role of State Machine Module (SMM) in temporal sentence generation, there need more detailed explanation of the SMM and its role.\n3) How does the proposed method handle situations with significant occlusions or viewpoint changes, which are common in real-world surveillance videos?\n4) The two-stage process for extracting spatial local patterns using image-text alignment and cross-modality attention is not explained in enough detail. The paper lacks a clear, step-by-step explanation of how these complex processes work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The Global and Local Pattern representations in Figure 1 are hand-drawn, which limits their reliability. Are there any real feature visualization images available instead? Using actual visualizations could better illustrate the motivation and effectiveness of the proposed method, particularly in showing whether it yields more distinguishable local patterns. Figure 1 alone does not provide enough information to convey the method’s motivation and impact.\n2. Utilizing Qwen for cropping bounding box regions based on prompts could significantly impact efficiency.\n3. Is the introduction of the Qwen-Chat model the primary source of performance improvement? My concern is that the proposed method incorporates numerous external models, and it remains unclear whether these additions are the main contributors to the observed performance gains.\n4. Could smaller models be used to replace these large multimodal models? If so, would this result in a significant decrease in performance?\n5. Could you provide statistical results on runtime and efficiency? Does the proposed method have a significant impact on operational efficiency?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed framework is well-structured, with thoughtfully implemented methods. \n2. Experimental results confirm that the approach achieves state-of-the-art performance on established benchmark datasets for video anomaly detection. \n3. The method focuses on fine-grained anomaly features and employs text-image alignment to effectively capture local patterns. \n4. It incorporates Long-range Memory technology, specifically HiPPO, into video anomaly detection."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel framework for video anomaly detection (VAD) that prioritizes identifying local patterns over conventional global patterns. The authors contend that local patterns generalize better to novel anomalies that were not encountered during training. Their proposed approach follows a two-stage process involving image-text alignment and cross-modality attention to efficiently capture and model local patterns. Additionally, the framework includes a State Machine Module (SMM) to integrate temporal dynamics, enabling enhanced anomaly detection by leveraging both spatial and temporal cues. Experimental results show that this approach achieves state-of-the-art performance on well-established benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The core of the proposed SMM module comes from works like HiPPO, so it seems that the proposed SMM is directly applying these modules to the VAD task. \n2. Both the Image-Text Alignment Module and Cross-Modality Attention Module are based on pre-existing techniques, which limits the methodological innovation.\n3. Is the observed performance improvement attributed to the additional large vision-language models, such as Qwen-VL and BLIP2? The comparison may not be entirely fair. It would be beneficial if the authors could provide evidence or experimental results to clarify whether these powerful external models are the primary contributors to the performance gains.\n4. The motivation for the work lacks clarity. How do the image-text alignment and cross-modality attention modules achieve “identification of local patterns that are consistent across domains and generalize well”? Additionally, how do they contribute to “generalizing model representations to novel anomalies”?\n5. Certain claims may require further validation, such as the statement: “the complementary relation between visual and textual features remains underexplored.”\n6. The paper lacks runtime and efficiency analysis. The code introduction is incomplete, and several experimental details are missing, such as the specific version and scale of Qwen-VL used."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "My understanding of this field is unprofessional. So I will further follow the opinions of other reviewers."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The two-stage training method of this paper is reasonable. And allow for more fine-grained local features.\n2. This paper gives a lot of visualizations to make it easier to understand the specific content.\n3. The paper has achieved good performance, and the ablation experiment is given."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This article is about video anomaly detection (VAD). This paper proposes a framework for recognizing local patterns, which can be generalized to new samples and dynamic modeling of local patterns. This paper proposes image-text alignment and cross-modal attention. Generalizable representations are built by focusing on textual information features that filter out unnecessary differences in visual data. In addition, time motion estimation complements spatial local models to detect anomalies characterized by new spatial distributions or unique dynamics. A large number of experiments have verified the effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Not giving motivation for each part of the method. In my opinion, a good paper should give a specific reason and then introduce the method.\n2. The efficiency of the model is worth discussing. You have proposed a lot of model modules. How much more reasoning time will they add to the network?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024local,\ntitle={Local Patterns Generalize Better for Novel Anomalies},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ua4wyAQLm},\nnote={under review}\n}"
},
"abstract": {
"value": "Video anomaly detection (VAD) aims at identifying novel actions or events which are unseen during training. Existing mainstream VAD techniques typically focus on the global patterns of events but struggle to generalize to novel samples. In this paper, we propose a framework that identifies the local patterns which generalize to novel samples and models the dynamics of local patterns. The capability of extracting spatial local patterns is achieved through a two-stage process involving image-text alignment and cross-modality attention. Generalizable representations are built by focusing on text-informative features that filter out unnecessary visual data variances. To enhance spatial local patterns with temporal clues, we introduce a State Machine Module (SMM) that combines tokens from different moments to improve sentence generation within cross-modality attention. Furthermore, temporal motion estimation complements spatial local patterns to detect anomalies characterized by novel spatial distributions or distinctive dynamics. Extensive experiments on popular benchmark datasets demonstrate the achievement of state-of-the-art performance. Code is available at https://anonymous.4open.science/r/Local-Patterns-Generalize-Better-1E30/."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Global Patterns; Local Patterns; Image-Text Alignment Module; Cross-Modality Attention; Temporal Sentence Generation; State Machine Module"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d6c777f6b718a52624a5914f78b2bbfb2c7dda69.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Local Patterns Generalize Better for Novel Anomalies"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4ub9gpx9xw | Walk the Talk? Measuring the Faithfulness of Large Language Model Explanations | main | Active | large language models;faithful explanations;explainability;safety;counterfactual reasoning | interpretability and explainable AI | 5;6;8;8 | 4;4;4;3 | 2;3;3;3 | 3;3;3;3 | 3;4;4;3 | 6.75 | 3.75 | 2.75 | 3 | 3.5 | -0.555556 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Here the causal concept effect is considered the ground truth in some sense. Then would it make sense to directly explain the model prediction using the causal concept effect?\n- For the dataset level faithfulness, instead of averaging question level faithfulness, why not directly measure PCC of all examples in the dataset?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Inspecting more finegrained faithfulness is a novel contribution and it allows us to gain better understanding of specific biases in model explanations.\n- The paper proposes a principled method to quantify faithfulness based on counterfactual examples.\n- The finding that safety alignment can make the model hide true reasons (e.g., gender bias) in its explanation (thus is only a form of shallow alignment) is very interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to measure the faithfulness of LLM generated natural language explanations in terms of specific concepts mentioned in it. Specifically, faithfulness is measured by the correlation between the causal effect of a concept (measured by counterfactual predictions) and the likelihood of the concept being mentioned in the explanation. This analysis produces several interesting results, e.g., the model doesn’t mention gender in its explanation despite gender having large causal effect on its prediction; safety alignment can affect model explanation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It is unclear how the explanations are generated, e.g., are these CoT from zero shot or few shot prompting? Is explanation generated before or after model prediction? It would be interesting to analyze how different prompting methods change the result or improve faithfulness.\n\nMinor:\n- 152: distinct -> disentangled might be a more precise word\n- 194: typo: x in the equation should be in vector form"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. One of the decisions in the paper is to use an auxiliary model to extract concepts, propose a list of alternative values for each concept. Why is this necessary and is there any assumption or requirement that helped settling for a GPT-4o as the auxiliary LLM? The authors could better motivate the selection of GPT-4o in their paper, perhaps by including a small human study comparing the effectiveness of different models in extracting concepts and creating the list of alternate values. The authors should also consider including the prompt used to extract concepts and concept values in the Appendix.\n2. In line 218, the authors mention the use of auxiliary LLM to “list distinct concepts in the context of x”. What kind of verifications were performed to ensure that the extracted concepts and their list of concepts were meaningful? The authors should consider adding a list of extracted concepts and their list of values to the Appendix. They should also consider adding more details about the validation (e.g., manual validation or llm-as-a-judge approach).\n3. Similarly, to the two questions above, in line 224-225, the authors mention “to generate each counterfactual, we instruct [auxiliary LLM] to edit the question x by changing the value of [concept] ..., while keeping everything else the same”. However there seems to be no validation of this claim. Did the authors validate that the perturbed input x was indeed minimally distant from x? If not, the authors should consider including such analysis, perhaps by showing the minimum edit distance or n gram overlap between the modified and original inputs.\n4. Why did the authors select a linear correlation coefficient as opposed to a non-linear coefficient?\n5. In Figure 1 (right), we observe that different behavioral concepts end in different regions of the scatter plot (there are orange points in the top and orange points around EE in [-0.5, -1.5]. Is there any insight or pattern that justify why there are different clusters? Could it be that the model is less prone to use some concepts for specific demographics?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Causally inspired definition and methodology to assess the faithfulness of LLM-generated explanations.\n- Empirical validation of proposed methodology in two different question answering datasets.\n- The finding that GPT-3.5 produces more faithful explanations (at a dataset-level) than the more recent and advanced models (GPT-4o and Claude 3.5 sonnet) is interesting. They also show that unfaithful explanations by GPT-3.5 is more harmful than GPT-4o\n- The analysis concerning the impact of different types of interventions (i.e., remove concept vs swap it with a different value) is interesting, revealing the brittleness of safety guards employed in GPT-3.5 and GPT-4o."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "**Summary**:\nThis paper adopts a causal inference approach to define and evaluate the faithfulness of LLM-generated explanation in the context of two question answering tasks. The obtained results X.\n\n**Main contributions**: The main contributions are the definition and methodology proposed to assess the faithfulness of explanations.\n\n**Methodology**:\n- Key to the methodology is the assumption that a model is faithful if its explanations consist of only concepts that are impactful for the decision (i.e., have large causal effects) (lines 183-185).\n- The authors first compute the causal effect associated with each concept in the explainability (CE). An auxiliary LLM is used to determine the _explainable_ concepts and produce counterfactual perturbations for each input x. CE is then estimated by contrasting the distributional differences between LLM responses when given the modified inputs vs the original inputs.\n- Then the authors determine the prevalence of each concept appearing in the explanation (EE).\n- Finally, the authors determine the linear alignment between CE and EE (dubbed causal concept faithfulness) for each example using the pearson correlation coefficient. Dataset-level faithfulness score is the average over the examples.\n\n**Writing**: Overall the writing is clear and easy to follow! The authors did a good job in exposing the ideas. Consider the following comments to further increase clarity:\n- Add information about when the experiments were run with each model.\n- lines 321-323: you describe the colors for each of the concept categories. However there seems to be a mismatch between the category color in the image and the color described in text."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Insufficient validation of the proposed approach**: the authors mention that their method can be used to quantify and discover interpretable patterns of unfaithfulness. However, there is no guarantee that the methodology detects truly unfaithful concepts. To further ensure the correctness of the approach, it would be nice to show linear agreement between CE and EE in a controlled setting where LLMs are known to only produce faithful explanations (e.g., unbiased setting).\n2. **Important parameters of the experiments are not clear in the paper, which may affect reproducibility of the experiments**. The authors could consider providing additional details about the exact number of perturbations generated for each example, the number of generations used to estimate P(Y|X) (during the definition of CE), the decoding hyperparameters (including the temperature and max number of tokens). Additional details should also be provided about how each response y is parsed – this is particularly relevant given that the evaluated models are known to produce nuanced and semantically equivalent texts.\n2. **Small evaluation set and concerns about generalizability**: for the two question-answering, the authors estimate the causal effects (CE) and explanation-implied (EE) effects for 30 examples (using 50 generations per each dataset). However, it is unclear how robust these results are and whether these would generalize to larger models. Perhaps the authors could show how the faithfulness score varies as the number of datapoints increases, therefore, providing an idea of the variability and stability of the scores.\n3. **Univariate counterfactuals**: if I understood correctly, the proposed framework focuses on perturbing the sentences one concept at a time, irrespective of the correlations between features. However, this fails to account for the correlations between different features (e.g., name of schools or organizations is related to socio-demographic features). \n4. **Generalizability to open-source models**: the paper carries analyses on two OpenAI models (GPT-3.5, GPT-4o) and one Anthropic model (Claude-3.5-sonnet). Could be interesting to contextualize the faithfulness of existing open source models (e.g., Llama 3.1) and draw comparisons with closed-source ones."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In Figure 1, it appears as though the correlation between EE and CE would be significantly lower if done independently for each concept, and then averaged. My question is: is calculating faithfulness on a per-concept basis is possible with your method?\n- And a related question, given that pearson correlation only measures to what extent points lie on *a* line, and not on *the* line y=x, is it the most appropriate metric for you use case, did you consider others?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Precise definition of what is meant by faithfulness.\n- 'causal concept faithfulness' as proposed will be useful for the explainability community.\n- The paper is written well, while being information-dense."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "A formulation of faithfulness is presented called causal concept faithfulness. According to this formulation, when a model produces a natural language explanation of its behaviour that appeals to certain concepts (e.g. gender), altering the input to have a different concept value (e.g. changing the gender of person mentioned in the text), should also alter the behaviour. Thus, under this formulation, a model is faithful if and only if it appeals to exactly those concepts that—if altered—would actually change its behaviour. To measure faithfulness the correlation between two metrics is used: (1) the probability that a concept is mentioned in an explanation; and (2) the actual change after altering the inputs, the KL divergence between the model's output distribution before and after alteration is used. \nTo avoid having to measure these values on very large datasets, the authors propose to use a Bayesian hierarchical model, which 'partially pools' information across interventions for related concepts.\nExperiments are performed on two tasks. The first is a task designed to elicit unfaithful explanations. Models perform poorly w.r.t. two out of three concepts. In the second task, models are shown to have limited faithfulness when explaining their answers for medical question answering."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The use of GPT4o to generate counterfactual inputs is not evaluated independently."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Can a faithful explanation be biased? For example, suppose if both CE and EE are high for the example in Table 1, and if the model would have answered Male: 26% Female: 74%, with Explanation References: Traits/Skills: 15% Age: 0% Gender: 85%. This is a clear case of gender bias, but can we say the explanation is faithful here referring to Definition 2.3?\n2. How can we understand the observations if the models memorized these benchmark datasets during their training stage? Does this imply that the model’s reasoning process is so vulnerable to bias that it can even disregard a previously memorized correct answer?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "I really enjoyed reading the paper! It addresses a highly relevant topic of faithfulness in the current landscape of AI. The writing is engaging and clear. \n\nOne of its standout features is its approach to a critical issue: it offers a concrete and measurable method for assessing explanation faithfulness in large language models, an area that has been difficult to define in previous research. By introducing the concept of causal concept faithfulness, the authors provide a way to evaluate how \"honest\" a model's explanations are, while also revealing specific patterns of misleading explanations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates ‘unfaithfulness’ of SOTA LLMs. In this paper, the authors introduce a new metric for measuring the faithfulness of a model, namely, causal concept faithfulness that not only quantifies but also reveals semantic patterns of unfaithfulness. To uncover these patterns, they put this method to the test on two tasks - a social bias task and a medical QA task to demonstrate how decisions made by the models change along with the provided explanations to justify the wrong decisions made by the model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**High level comments**\n1. It remains uncertain whether biases impact all types of reasoning tasks uniformly or if certain domains are more affected than others.\n2. Moreover, the experiments do not specify how these findings may apply beyond classification tasks to biases that could affect other generative tasks.\n\n***Minor comment***\nLine 321 typo -> should be orange for behavior, red for identity"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a novel method for measuring the faithfulness of explanations given by LLMs."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024walk,\ntitle={Walk the Talk? Measuring the Faithfulness of Large Language Model Explanations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ub9gpx9xw},\nnote={under review}\n}"
},
"abstract": {
"value": "Large language models (LLMs) are capable of generating *plausible* explanations of how they arrived at an answer to a question. However, these explanations can misrepresent the model's \"reasoning\" process, i.e., they can be *unfaithful*. This, in turn, can lead to over-trust and misuse. We introduce a new approach for measuring the faithfulness of LLM explanations. First, we provide a rigorous definition of faithfulness. Since LLM explanations mimic human explanations, they often reference high-level *concepts* in the input question that purportedly influenced the model. We define faithfulness in terms of the difference between the set of concepts that the LLM's *explanations imply* are influential and the set that *truly* are. Second, we present a novel method for estimating faithfulness that is based on: (1) using an auxiliary LLM to modify the values of concepts within model inputs to create realistic counterfactuals, and (2) using a hierarchical Bayesian model to quantify the causal effects of concepts at both the example- and dataset-level. Our experiments show that our method can be used to quantify and discover interpretable patterns of unfaithfulness. On a social bias task, we uncover cases where LLM explanations hide the influence of social bias. On a medical question answering task, we uncover cases where LLMs provide false claims about which pieces of evidence influenced its decisions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language models",
"faithful explanations",
"explainability",
"safety",
"counterfactual reasoning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/54c32c06783e40072d28371abc4308858c27cd4f.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Walk the Talk? Measuring the Faithfulness of Large Language Model Explanations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4v4RcAODj9 | DUALFormer: A Dual Graph Convolution and Attention Network for Node Classification | main | Active | Graph Transformers;Node Classification | learning on graphs and other geometries & topologies | 3;5;6;6 | 5;3;5;4 | 2;2;3;3 | 2;2;3;3 | 3;3;4;4 | 5 | 4.25 | 2.5 | 2.5 | 3.5 | -0.246183 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "N.A."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper is well-motivated.\n2. The proposed method is simple and effective.\n3. The inclusion of theoretical analysis strengthens the work.\n4. Extensive experiments show the effectiveness, scalability and robustness.\n5. This paper is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DUALFormer, a novel Graph Transformer model designed to address scalability challenges and improve local-global information fusion. The approach is both simple and theoretically grounded. Extensive experiments demonstrate DUALFormer’s effectiveness, scalability, and robustness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed method can be interpreted as \"attention on attributes\". I wonder how is it different from the standard self attention. Especially why it can perform better on node classification? And when it is expected to perform better and when not?\n2. Can you provide further analysis, such as case studies, to further explain the semantic meanings of the \"attention on attributes\"?\n3. Can you provide further analysis and empirical studies to show that the GNNs after the graph Transform can indeed learn the localities in graphs?\n\nI will raise my score if my concerns are properly addressed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The first question concerns the reasonableness of applying softmax to the global correlations between features.\n\n - In standard self-attention, $ \\mathbf{O} = \\exp(\\text{sim}(\\mathbf{Q}, \\mathbf{K}))\\mathbf{V} $ (Eq. 6).\n - Through linearized attention, $ \\mathbf{O} = \\phi(\\mathbf{Q}) \\phi(\\mathbf{K})^\\top \\mathbf{V} $ (Eq. 11), where each element in $ \\phi(\\mathbf{Q}) \\phi(\\mathbf{K})^\\top $ is non-negative, representing attention weights (global dependencies between nodes).\n - By the commutative property of matrix multiplication, $ \\mathbf{O} = \\phi(\\mathbf{Q}) (\\phi(\\mathbf{K})^\\top \\mathbf{V}) $, so we can interpret $ (\\phi(\\mathbf{K})^\\top \\mathbf{V}) $ as a correlation matrix (with elements that can be positive or negative).\n\n However, in Eq. 13, $ \\mathbf{V} \\text{softmax}(\\mathbf{Q}^\\top \\mathbf{K}) $, i.e., $ \\mathbf{Q} \\text{softmax}(\\mathbf{K}^\\top \\mathbf{V}) $, differs from $ \\phi(\\mathbf{Q}) (\\phi(\\mathbf{K})^\\top \\mathbf{V}) $ because elements in $\\text{softmax}(\\mathbf{K}^\\top \\mathbf{V}) $ are all non-negative, unlike those in $ (\\phi(\\mathbf{K})^\\top \\mathbf{V})$. Could you clarify these differences and explain why it is reasonable to replace $ \\phi(\\mathbf{Q}) (\\phi(\\mathbf{K})^\\top \\mathbf{V}) $ with $ \\mathbf{Q} \\text{softmax}(\\mathbf{K}^\\top \\mathbf{V}) $?\n\n- The second question pertains to the interpretation of the proposed global attention. The method appears to aggregate information along the feature dimension, unlike previous approaches that gather global information across all or most nodes in a graph. For a one-dimensional feature, $ \\mathbf{V} \\text{softmax}(\\mathbf{Q} \\mathbf{K}^T) $ in Eq. 13 reduces to $ \\mathbf{V} \\cdot \\alpha $, where $ \\alpha $ is a scalar and $ \\mathbf{V} \\in \\mathbb{R}^{n} $. How can this be understood as gathering information from a global perspective?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The writing is generally clear and accessible, making the paper readable and easy to follow.\n- The proposed method is both understandable and implementable, yet effective. It performs well on several datasets.\n- The paper includes diverse experimental analyses, such as node classification, node property prediction, ablation studies, and parameter sensitivity analyses. Furthermore, the authors offer theoretical guarantees to support the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "To address the scalability limitations of graph transformers (GTs) and the challenge of balancing local and global information, this paper introduces DualFormer, a novel GT architecture. DualFormer calculates global attention along the feature dimension, enabling the model to perform effectively and efficiently on large graphs while maintaining strong performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The motivation for the study is not fully convincing. Further details are provided in the questions below.\n- Since the paper emphasizes the method’s scalability, additional experiments on larger graphs would reinforce this claim. Suggested datasets include *Roman-Empire*, *Question[1]*, *Wiki*, and *ogbn-papers100M*. Moreover, the GNN baselines in Tables 2 and 3 are outdated, which may reduce the persuasiveness of the results. For instance, the statement, “Most GTs consistently show superior performance over GNNs across all datasets” (line 451), would be more convincing if compared with recent GNN baselines, such as *ChebNetII[2]* and *OptBasis[3]*, to present a more comprehensive evaluation.\n- Minor Issues: There are a few typographical errors, such as \"abov\" (line 182). Consistent notation throughout the paper is also preferable. For instance, in line 168, there is a \"$\\times$\" symbol between a scalar and a matrix, but not in line 216. Additionally, line 191 includes a \"$\\cdot$\" between matrices, whereas line 167 does not.\n\n[1] A critical look at the evaluation of GNNs under heterophily: Are we really making progress? In ICLR 2023.\n\n[2] Convolutional Neural Networks on Graphs with Chebyshev Approximation, Revisited. In NeurIPS 2022.\n\n[3] Graph Neural Networks with Learnable and Optimal Polynomial Bases. In ICML 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See the above 'Weaknesses'"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1) The motivation for the dual design of local and global modules in this paper is clear and interesting.\n2) The model DUALFormer is simple and efficient with a solid theoretical foundation. \n3) The paper offers extensive experimental validation across various datasets. \n4) The paper is well-organized and easy to read."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DUALFormer, a graph transformer that tackles the challenges of the scalability and trade-off between local and global expressivity faced by current models. The motivation is to model the global dependencies among nodes by approximately characterizing the correlations between features. DUALFormer adopts a simple, intuitive design that includes local graph convolutional networks operating on the node dimension and a global self-attention mechanism operating on the feature dimension. The effectiveness and efficiency of the proposed DUALFormer are demonstrated in experimental evaluations across node classification and node property prediction tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The paper has some minor errors that need fixing. For example, Table 2 misses the mean value for the GraphGPS model on the Citeseer dataset. \n2) To enhance readability, Equation 13 should be split into two or three equations. \n3) The model DUALFormer places the GNN layers, such as the SGC layers, after the attention layers. What is the rationale behind this design? Is it possible to reverse this order? \n4) Figure 4 shows that the model utilizing APPNP outperforms the one using SGC in the Cora and Pubmed datasets. What accounts for this performance difference?\n5) The effect of certain hyper-parameters, such as the parameter $\\alpha$ in Equation 13, on performance has yet to be unverified. \n6) The paper does not mention any plans to open-source the code."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have the following questions:\n1. As the authors claim in Eq. 13, the proposed method only captures the feature-to-feature correlations. In my opinion, it is not the global information on the graph since it is unable to capture the relations between nodes. Why do authors claim the proposed method can capture the global information on the graph?\n2. According to the paper, the efficiency is the most important contribution of the proposed method. I think the authors express this point in a wrong way. Firstly, the authors claim that the computational complexity of the proposed method is $O(n)$ which is obviously wrong. Based on Eq. 14, the calculation involves the adjacency matrix. Hence, the computational complexity of this part is $O(E)$ and it is cannot be ignored since $|E|>|N|$ (even $|E|>>|N|$ on some graphs). Then, the authors only compare the time cost of each epoch to demonstrate the efficiency which is not reasonable. I think the total training time cost is the most important metric to demonstrate the efficiency of a method. So, the authors should report the overall training cost of each method for efficiency study, especially on large-scale graphs. Maybe authors can refer to the settings in NAGphormer. For instance, can the proposed method achieve more efficient and more powerful performance than NAGphormer on Aminer, Reddit and Amazon2M?\n3. As shown in Section 4.2, DUALFormer relies on the sampling strategy to perform on large-scale graphs, just like advanced linear graph Transformers. Hence, I think the GPU memory comparison is questionable since it is largely related to the batchsize. Do authors set the same batch for each method?\n4. The analysis of the $\\alpha$ is missing. According to Table 5, the performance of DUALFormer could be sensitive to the value of $\\alpha$. So, the parameter analysis of $\\alpha$ should be added into the experiment section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper is easy to follow.\n2. The authors provide the theoretical analysis.\n3. The results on various datasets seem to be promising."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper develop a new architecture based on GNNs and modified Transformers. The authors conduct expensive experiments as well as theoretical analysis to show the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparison of efficiency study seems to be not reasonable.\n2. The key contributions of the proposed method are not clear.\n3. The complexity analysis of the proposed method seems to be wrong. "
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dualformer,\ntitle={{DUALF}ormer: A Dual Graph Convolution and Attention Network for Node Classification},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4v4RcAODj9},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Transformers (GTs), adept at capturing the locality and globality of graphs, have shown promising potential in node classification tasks. Most state-of-the-art GTs succeed through integrating local Graph Neural Networks (GNNs) with their global Self-Attention (SA) modules to enhance structural awareness. Nonetheless, this architecture faces limitations arising from scalability challenges and the trade-off between capturing local and global information. On the one hand, the quadratic complexity associated with the SA modules poses a significant challenge for many GTs, particularly when scaling them to large-scale graphs. Numerous GTs necessitated a compromise, relinquishing certain aspects of their expressivity to garner computational efficiency. On the other hand, GTs face challenges in maintaining detailed local structural information while capturing long-range dependencies. As a result, they typically require significant computational costs to balance the local and global expressivity. To address these limitations, this paper introduces a novel GT architecture, dubbed DUALFormer, featuring a dual-dimensional design of its GNN and SA modules. Leveraging approximation theory from Linearized Transformers and treating the query as the surrogate representation of node features, DUALFormer \\emph{efficiently} performs the computationally intensive global SA module on feature dimensions. Furthermore, by such a separation of local and global modules into dual dimensions, DUALFormer achieves a natural balance between local and global expressivity. In theory, DUALFormer can reduce intra-class variance, thereby enhancing the discriminability of node representations. Extensive experiments on eleven real-world datasets demonstrate its effectiveness and efficiency over existing state-of-the-art GTs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Transformers",
"Node Classification"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/2c2ea3c30784e242967919a718d627d9952bc717.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "DUALFormer: A Dual Graph Convolution and Attention Network for Node Classification"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4v4nmYWzBa | REVISITING MULTI-PERMUTATION EQUIVARIANCE THROUGH THE LENS OF IRREDUCIBLE REPRESENTATIONS | main | Active | deep weight spaces;permutation equivariance;irredicible representations. | learning on graphs and other geometries & topologies | 3;6;6;6 | 3;4;3;4 | 2;3;4;3 | 2;2;2;3 | 2;4;3;3 | 5.25 | 3.5 | 3 | 2.25 | 3 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper introduces a fresh perspective on equivariant layer characterization by applying irreducible representations and Schur’s lemma to obtain simplified derivations of established models, such as DeepSets, 2-IGN, and Deep Weight Space (DWS) networks.\n\n2. The theoretical foundations are well-developed. The work provides a complete characterization of equivariant layers in the context of unaligned symmetric sets, which is an interesting theoretical contribution."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an alternative approach for characterizing equivariant linear layers in neural networks that process permutation and related group representations. The paper derives a simpler method for obtaining existing models such as DeepSets, 2-IGN, and Deep Weight Space networks, based on irreducible representations and Schur’s lemma. The proposed framework also considers unaligned symmetric sets, that build upon equivariance to the wreath product of groups."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation and flow of the paper could be improved. The claims and results are challenging to follow, which may limit the broader audience’s ability to appreciate the work.\n\n2. The paper’s contributions lack clarity. The paper offers an irreducible-based derivation for existing results and characterizes equivariant functions on unaligned symmetric elements, but the impact and relevance of these contributions remain unclear. It is not evident how these results benefit the design of novel architectures or enhance our understanding of current ones. This limits the significance of the work and may fall short of ICLR’s standards.\n\n3. The empirical evaluation is limited, and the results are not compelling. Using synthetic data for anomaly detection does not sufficiently demonstrate the method’s practical applicability, as the task is relatively unchallenging and does not show the strengths of the proposed approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Can the method be generalized to higher-order $k$-IGN in a principled manner? Can you briefly describe the claim that ``using irreducibles could lead to new equivariant models with intermediate irreducible features of lower dimensions''?\n\n* Can you conduct more experiments on real-world and large-scale datasets, and include more baseline? In addition, can you intuitively explain why non-Siamese layers help in these tasks?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper offers the irreducible representations perspective for deriving classical models like DeepSets, 2-IGN and DWS networks. Some derivations are simpler than the original ones. The writing is clear and easy to follow. I check with the details and they are sound."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies equivariant linear layers for representations of permutations and related groups from a novel irreducible representations perspective. The authors provide an alternative derivation for models including DeepSets, 2-IGN, and Deep Weight Space (DWS) networks. The theory is then extended to unaligned symmetric sets, showing that there is a vast number of additional non-Siamese layers in certain settings. Experiments show that additional non-Siamese layers improve the performance in tasks like graph anomaly detection, weight space alignment, and learning Wasserstein distances."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* While the new derivations align with original methods, the resulting models are not new. The concept of ``irreducible representation'' is also well studied, so the contribution of the paper lies mainly in bridging two topics, which is interesting but natural. In particular for equivariant graph layers, the authors only provide derivations for 2-IGN. As admitted in the limitation section, the paper does not involve higher-order $k$-IGN. The author should explain whether their method is broadly applicable for these networks based on tensor representations, or need case-by-case derivations.\n\n* Although this is a theoretical paper, the experiments could be improved. More baselines and more real-world tasks are strongly encouraged."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. I find the methodology presented in L135-155 valuable to the research community due to its generalizability beyond the provided examples, most of which are already characterized. For this reason, would it be possible to add a *brief* discussion on the generalization of this methodology to strengthen the impact of this contribution and broaden its relevance to a wider community? See the following for more specific questions.\n2. Computing a basis compatible with the irreducible representation decomposition can be challenging. Does this difficulty limit the methodology’s generalization? Are there similar technical challenges for characterizing $k$-IGN layers for $k > 2$?\n3. Can this methodology be applied to other groups beyond $S_n$ and wreath products? If so, could you briefly provide a few examples?\n4. Representations of the symmetric group are relevant in machine learning and its irreducible representation are absolutely irreducible. In contrast, other relevant groups, such as finite cyclic groups, have real irreducible representations that are not absolutely irreducible. Could the framework presented here be extend to these cases? What potential challenges do you envision in extending to non-absolutely irreducible representations?\n5. Could you elaborate on the future directions for $k$-IGNs presented in the conclusions (L537-539)?\n\n**Minor Issues (No Impact on Recommendation):**\n- L073: I recommend specifying \"$2$-IGNs\" for transparency.\n- L183: Is the presentation of $P_\\tau$ unnecessary?\n- L340: The wreath product of groups is introduced but not defined in detail; as this operation is uncommon in machine learning literature, additional explanation would benefit Section 5. Also, consider demonstrating that equation 7 forms a linear representation of this group, perhaps in the appendix.\n- L420: Typo, “is prove”.\n- L379 and L1030: I cannot understand why $\\mathcal{V}^k$ is an irreducible representation of $\\mathcal{G}^k$; is it instead irreducible for $\\mathcal{G} \\wr S_n$?\n- L1040: The closing curly bracket is missing."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- Clear presentation and notation, supported by rigorous proofs.\n- The methodology is both valuable and simple, with potential to generalize beyond the examples presented.\n- A novel and complete characterization of representations for unaligned symmetric elements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel methodology for characterizing equivariant linear layers for permutation representations, utilizing classical results from representation theory. Specifically, it provides an alternative characterization of equivariant linear layers for DeepSets, $2$-IGNs, and DWSNets, as well as the first comprehensive characterization of equivariant linear layers for unaligned symmetric elements. Importantly, the authors identify novel non-Siamese layers and empirically assess their impact."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Lacks discussion on extending the approach to groups and representations beyond the few presented.\n- In particular, an appropriate discussion on characterizing the more expressive layers of $k$-IGNs for $k>2$ is missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "I would like the authors to comment on the above points regarding novelty and significance of experiments. \n\nMy current opinion is that the work is exceptionally well-written, and bears several contributions to the geometric deep learning literature. However, I am concerned with the novelty and significance, as outlined above. Still, I am leaning towards accepting the paper, but would like to hear from the authors about my points of criticism."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "-The paper is exceptionally well written. The language is clear and concise, the sections are structured, and the mathematical formalism/notation is elegant. \n\n-The problem considered is a fundamental one in machine learning literature. Constructing (linear) equivariant maps lies at the heart of geometric deep learning, which has been successful in several applications. \n\n-The proposed solution is general, as it applies, in principle, to any input/output group representation. Several existing frameworks are phrased under the same paradigm, contributing with structure and clarity to the geometric deep learning literature."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper considers the problem of constructing linear equivariant layers for groups acting (linearly) on input and output spaces. Specifically, it proposes to exploit the decomposition into irreducible group representations and then appealing to Schur’s Lemma, which reduces the problem to choosing coefficients for pairs of isomorphic representations. Several specific instances are analyzed, such as permutation groups in the context of graph neural networks, groups acting on weights of deep networks, and wreath products acting on products of representations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I believe that the proposed approach via Schur’s Lemma comes with disadvantages. To begin with, using Schur’s Lemma to construct equivariant linear maps is not novel in the geometric deep learning community. It is a rather well-known technique – see, for example, Behboodi et al., Section 3.2. This is a major concern, since Schur’s Lemma represents a core point of this work; the other contributions amount to rephrasings of known frameworks from the literature under the lenses of Schur’s Lemma. Moreover, Schur’s Lemma has some restrictions. First, it requires the decomposition into irreducible representations to be known a priori, which is not always the case. Such decomposition is challenging to compute algorithmically for general groups and representations. Second, Schur’s Lemma applies naively only to complex representations (i.e., over $\\mathbb{C}$). As the authors mention, this is not an issue for permutation groups (appendix B), but it can be for other groups. It is still possible to apply Schur’s Lemma to arbitrary real representations of arbitrary groups, but this involves subtleties – see Behboodi et al., Section 8. \n\nI also find the experimental section rather weak. The experiments reported only consider ideal equivariant tasks, i.e., scenarios where the ground-truth function is equivariant. The experimental results show that adding equivariant layers to the network improves (generalization) performance, as compared to non-equivariant architectures. This is not surprising, since in these cases the inductive bias given by equivariance aligns perfectly with the structure of the task. In typical real-world scenarios (e.g., image classification), the (highly-noisy) ground-truth function is instead not exactly equivariant, or it is not equivariant on all the input data. In my opinion, it would be more informative and less trivial to test the models on these types of real-world tasks. The equivariance bias is often still beneficial in terms of generalization – as works in geometric deep learning have extensively shown – but empirical investigations are required to assess this carefully. \n\nMinor typos: \n\n-The paragraph title on line 86 is not capitalized, while the one on line 100 is. \n\n-The tables in section 6 exceed the margins of the paper.\n\n\nBehboodi et al., A PAC-Bayesian Generalization Bound for Equivariant Networks, NeurIPS 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Characterization of all permutation equivariant linear mappings using irreducibles approach on several use-cases."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024revisiting,\ntitle={{REVISITING} {MULTI}-{PERMUTATION} {EQUIVARIANCE} {THROUGH} {THE} {LENS} {OF} {IRREDUCIBLE} {REPRESENTATIONS}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4v4nmYWzBa},\nnote={under review}\n}"
},
"abstract": {
"value": "This paper explores the characterization of equivariant linear layers for representations of permutations and related groups. Unlike traditional approaches,\nwhich address these problems using parameter-sharing, we consider an alternative\nmethodology based on irreducible representations and Schur’s lemma. Using this\nmethodology, we obtain an alternative derivation for existing models like DeepSets,\n2-IGN graph equivariant networks, and Deep Weight Space (DWS) networks. The\nderivation for DWS networks is significantly simpler than that of previous results.\nNext, we extend our approach to unaligned symmetric sets, where equivariance\nto the wreath product of groups is required. Previous works have addressed this\nproblem in a rather restrictive setting, in which almost all wreath equivariant layers\nare Siamese. In contrast, we give a full characterization of layers in this case and\nshow that there is a vast number of additional non-Siamese layers in some settings.\nWe also show empirically that these additional non-Siamese layers can improve\nperformance in tasks like graph anomaly detection, weight space alignment, and\nlearning Wasserstein distances."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep weight spaces",
"permutation equivariance",
"irredicible representations."
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/37b7ce376b8cc287941bdd0cb02b074f952702fb.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/0d5a43a77051139145697ec92923f0c11bccab19.zip"
},
"title": {
"value": "REVISITING MULTI-PERMUTATION EQUIVARIANCE THROUGH THE LENS OF IRREDUCIBLE REPRESENTATIONS"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4vPC6Aj6N7 | Multi-Agent Reinforcement Learning from Human Feedback: Data Coverage and Algorithmic Techniques | main | Active | multi-agent reinforcement learning;reinforcement learning with human feedback;dataset coverage | reinforcement learning | 5;5;6;6 | 4;4;2;3 | 3;2;3;3 | 2;3;3;2 | 2;3;3;3 | 5.5 | 3.25 | 2.75 | 2.5 | 2.75 | -0.904534 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why was VDN specifically chosen as the base MARL algorithm, given its known limitations in representation capacity? How would the proposed approach perform with more advanced MARL algorithms like MAPPO, IPPO, or QMIX?\n2. Given that the experiments were conducted only on MPE environments (Spread-v3, Tag-v3, Reference-v3), how would the method perform on more complex MARL benchmarks? What challenges do you anticipate, and how sensitive might performance be to the choice of hyperparameters $\\alpha$ and $\\beta$?\n3. What policy was used to generate responses for collecting preference feedback?\n4. How was the preference feedback collected? Was it synthetic, based on true environment rewards, or did it come from real human preferences? These details are crucial for reproducibility, a deeper understanding of the approach, and identifying potential biases in the preference data.\n5. The inherent dependence between the policy used to train the reward model and the policy being learned is not addressed in the paper. For instance, in the single-agent setting (see [4]), this dependence can be significant. How does the proposed approach handle this issue?\n6. How does the quality of the learned reward function vary with different levels of expertise and sparsity in preference feedback?\n\n[1] Yu, Chao, et al. \"The surprising effectiveness of ppo in cooperative multi-agent games.\" Advances in Neural Information Processing Systems 35 (2022): 24611-24624.\n\n[2] De Witt, Christian Schroeder, et al. \"Is independent learning all you need in the starcraft multi-agent challenge?.\" arXiv preprint arXiv:2011.09533 (2020).\n\n[3] Rashid, Tabish, et al. \"Monotonic value function factorisation for deep multi-agent reinforcement learning.\" Journal of Machine Learning Research 21.178 (2020): 1-51.\n\n[4] Chakraborty, Souradip, et al. \"PARL: A Unified Framework for Policy Alignment in Reinforcement Learning from Human Feedback.\" The Twelfth International Conference on Learning Representations."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* This paper makes novel contributions to RLHF within multi-agent systems by framing the task as finding a Nash equilibrium in general-sum games and introducing innovative techniques for reward regularization and dataset distribution-based pessimism.\n* The theoretical results are comprehensive and well-justified, effectively supporting the paper’s claims.\n* The paper is generally well-written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the important and timely problem of multi-agent reinforcement learning from human feedback (MARLHF). The authors examine both theoretical and practical aspects of MARLHF, demonstrating that single policy coverage is insufficient and emphasizing the need for unilateral dataset coverage. To address the issues of sparse and spiky reward learning typical in standard RLHF, they propose two primary techniques: (1) mean squared error regularization to promote uniform reward distribution, and (2) an additional reward term based on state-action pair density within the dataset to introduce pessimism, using an imitation learning-based approach for density modeling. The final policy is then trained using the VDN algorithm. Overall, this MARLHF approach represents a significant step toward preference-based reinforcement learning in multi-agent systems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The empirical validation of the approach is limited, as the paper only includes experiments on three simple MPE environments. Since the authors utilized JAXMARL, testing on more realistic and complex environments from the JAXMARL API, such as Overcooked, Hanabi, or StarCraft, would strengthen the paper’s claims.\n* The comparison with MARL baselines is insufficient, focusing only on VDN despite its known limitations in representation capacity. Conducting ablation studies with other MARL algorithms, such as MAPPO[1], IPPO[2], and QMIX[3], would provide more validations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How would your approach need to be modified to handle inconsistent or non-transitive preferences that often occur with real human feedback?\nWhy do you call the paper MARLHF when there is clearly no HF?\nThe practical implementation differs significantly from the theoretical algorithm - can you explain this gap and discuss whether any theoretical guarantees carry over?\nGiven the relative simplicity of the tasks, why were only 5 random seeds used for the experiments?\nWhy weren't statistical significance tests performed to validate the comparative results?\nHow well does your approach scale with increasing numbers of agents? \nIn cases where mixed-skill policies outperform pure expert policies, can you verify that this reflects genuine improvement rather than issues with reward modeling?\nHave you tested MARL algorithms other than VDN?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "In terms of the proofs, there is a simple but convincing proof by counterexample provided for theorem 1 (not contradiction, as stated).\nThere is an explicit bounds found on the Nash-gap. \n\nHyperparameters used in the training are provided, multiple seeds are used and results that don’t support the desired conclusion are presented. Multiple environments are tested, and clear ablation studies are done.\n\nThe paper makes an interesting theoretical contribution by establishing fundamental results about Multi-Agent Reinforcement Learning from Human Feedback (MARLHF). The authors prove why single-policy coverage is insufficient and demonstrate that unilateral coverage is both necessary and sufficient for learning Nash equilibria. These theoretical foundations are presented with clear proofs that are well constructed. These theoretical results then explicitly inform the design of the framework which is clearly stated and explained.\n\nThe empirical work is comprehensive and well-designed, testing their approach across three distinct multi-agent scenarios that each present different challenges (cooperative target coverage, coordinated pursuit, and communication-dependent navigation). The experiments validate both the theoretical insights about dataset coverage and the effectiveness of their algorithmic innovations. Their ablation studies are thorough and give clear evidence for the value of their MSE regularization and dataset distribution-related penalties. The authors also introduce a practical standardization technique for hyperparameter tuning that works across different environments.\n\nThe clarity of the experimental setup makes the work also highly reproducible"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the problem of trying to learn human preferences (this behaviour is better than that behaviour) in a multi agent RL setup. In this case satisfactory learning means a Nash-equilibrium is reached between all policies. The authors positions the paper as an initial study into Multiagent Reinforcement Learning from Human Feedback.\n\nThe paper shows how pure expert policies are not always the best for maximising overall score, and that mixing in less expert policies in some cases causes an overall higher score to be reached in the MARLHF case. This is proved, theoretically. They also show that it is often easier to learn what policies score higher by having unilaterally divergent policies acting in the environment, where a single agent is using a sub-optimal policy. The authors call this approach unilateral coverage. By having this unilateral agent in the environment it becomes simpler to observe what policies may be truly optimal within the environment. In addition upper complexity bounds are established for Nash Equilibrium in effective MARLHF.\n\nThe process to implement this approach is to learn a reward function from a preference dataset while mitigating extrapolation errors with a pessimism term and then determining a final policy. Human Feedback is itself simulated using the Bradley-Terry-Luce model to rank solutions.\n\nThe authors make 2 particular contributions to implement their insights:\nApplying MSE regularisation to the training data to distribute rewards more evenly across timesteps, which helps to avoid temporal concentration. This essentially takes the sparse reward signals from the Bradley-Terry-Luce model and spread them out to produce reward over more timesteps.\nDataset distribution-based penalties are used to constrain exploration to known regions of the state space\n\nTheir empirical evaluation spans three multi-agent scenarios: cooperative target coverage, coordinated pursuit, and communication-dependent navigation. They show that incorporating imperfect policies is helpful for learning higher scoring policies during training. In harder tasks, unilateral coverage and diversity become more important and more diverse datasets led to lower variance in training outcomes. The authors also introduce a principled standardization technique for hyperparameter tuning across environments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main weakness is that despite the paper's title and framing, there is no actual human feedback involved in any of the experiments. Instead, the authors simulate preferences using the Bradley-Terry-Luce model based on known reward functions from the environments. This is a significant limitation because real human preferences are likely to be much noisier, inconsistent, and potentially non-transitive compared to their simulated preferences. The paper would be more accurately titled as \"Multi-Agent Reinforcement Learning from Simulated Preferences\" or similar, and should more explicitly acknowledge this limitation and discuss how their approach might need to be modified for real human feedback.\n\nWhile thorough, the theoretical results rely heavily on assumptions that may not hold in practice. The paper assumes linear Markov games and works with known feature mappings, but doesn't discuss enough how these assumptions might limit real-world applicability. Additionally, although the paper proves that their theoretical algorithm converges to Nash equilibria, the practical implementation uses different algorithms (VDN-based) with no theoretical guarantees. This gap between theory and practice is not sufficiently discussed. The paper also doesn't explore whether the Nash equilibrium is actually desirable in all cases - in some scenarios, other solution concepts might better align with human preferences. This again is one of the major weaknesses with the unclear framing.\n\nThe experimental evaluation, while systematic, is limited to relatively simple environments in the Multi-Agent Particle Environment (MPE) framework. These environments, while useful for testing basic concepts, are far simpler than real-world multi-agent scenarios. The paper doesn't adequately discuss how their approach might scale to more complex environments or to scenarios with larger numbers of agents. Their results showing that mixed-skill policies can outperform pure expert policies raise questions about whether their reward modeling approach is capturing the true objectives of the tasks. \n\nAnother important weakness in the paper's empirical evaluation is the absence of statistical significance testing. Although results with means and standard deviations across 5 random seeds are given, they don't perform any statistical analysis to validate the conclusions. This is particularly problematic given the small sample size - with only 5 seeds, the reliability of their comparisons is questionable. The paper lacks hypothesis tests. This makes it difficult to determine if the reported differences between approaches are statistically significant, especially in cases where the differences appear small relative to their standard deviations. For example, in Spread-v3, it's unclear whether the difference between \"Mix-Unilateral\" (-20.98 ± 0.56) and \"Mix-Expert\" (-21.11 ± 1.16) is meaningful. The lack of statistical rigor undermines the strength of the paper's empirical conclusions and the claims made about the benefits of their approaches."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. This paper analyzes the RLHF setting; however, the definition of the performance metric remains unchanged from the RL setting without KL regularization. Could the authors provide further clarification on this?\n\n2. Could the authors highlight the novel aspects of the current theoretical analysis that differentiate it from the offline MARL setting?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The theoretical analysis presented in this paper is solid and clear, providing a sound theoretical bound for the proposed method to solve MARLHF. Additionally, the authors conduct various experiments to demonstrate the effectiveness of the proposed method, even when applied to offline datasets lacking uniform coverage."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study introduces Multi-Agent Reinforcement Learning from Human Feedback (MARLHF) to find Nash equilibria from preference-based data with sparse feedback. A key technique in this paper is to use the MSE regularization for uniform rewards and a pessimism-based penalty—to improve stability and performance, enabling more effective preference-based multi-agent systems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The discussion section on related works is incomplete. The authors should provide a more thorough discussion of recent advancements in MARL and offline RLHF ([1]-[4]). Additionally, the paper emphasizes the importance of incorporating reward regularization in the objective function for the current task. However, similar ideas have been adopted in different contexts and should be discussed carefully ([3]-[6]).\n\n2. The current experiments primarily showcase different variants of the proposed methods and include an ablation study. Could the authors include more baseline methods for comparison? Additionally, incorporating more tasks (e.g., five tasks) would strengthen the findings and provide greater convincing power for readers.\n\n3. The theoretical analysis currently focuses solely on the linear function approximation setting, which may not be realistic given the use of neural networks in the experiments. Could the authors extend the analysis to accommodate general function approximations, or clarify how the experimental setup meets the requirements of linear function approximation?\n\n4. In Line 300, it seems that someone even left comments colored in blue, which may leak the information of the authors. It is suggested that the authors should double-check the submitted draft to avoid this careless mistake.\n\n5. In Line 276, the reference to \"an approximate Nash equilibrium policy\" in the theorem lacks clarity, as it does not illustrate the approximation error in relation to the size of the offline dataset. The authors should expand on the implications of the derived bound and compare their results with existing theoretical findings in the offline RL and MARL literature.\n\n\n[1] Wang, Yuanhao, et al. \"Breaking the curse of multiagency: Provably efficient decentralized multi-agent rl with function approximation.\" The Thirty Sixth Annual Conference on Learning Theory. PMLR, 2023.\n\n[2] Xiong, Nuoya, et al. \"Sample-Efficient Multi-Agent RL: An Optimization Perspective.\" The Twelfth International Conference on Learning Representations.\n\n[3] Liu, Zhihan, et al. \"Provably mitigating overoptimization in rlhf: Your sft loss is implicitly an adversarial regularizer.\" arXiv preprint arXiv:2405.16436 (2024).\n\n[4] Cen, Shicong, et al. \"Value-Incentivized Preference Optimization: A Unified Approach to Online and Offline RLHF.\" arXiv preprint arXiv:2405.19320 (2024).\n\n[5] Mete, Akshay, et al. \"Reward biased maximum likelihood estimation for reinforcement learning.\" Learning for Dynamics and Control. PMLR, 2021.\n\n[6] Xie, Tengyang, et al. \"Bellman-consistent pessimism for offline reinforcement learning.\" Advances in neural information processing systems 34 (2021): 6683-6694."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Figure 1: $\\pi_{ref}$, while mentioned in the caption, doesn't seem to be appearing in the figure. Do you mean $\\pi_b$?\n- What does the blue text mean in Lines 300-301?\n- Table 2: The claim in the capture, namely, \"in more challenging environments, such as Tag-v3, dataset diversity plays a substantially more significant role\", seems inconsistent with the data in the table, where both the mean and the variance of the return of Tag-v3 reach their best in the Pure-Expert dataset which has the least diversity.\n- Table 2: The claim in Lines 419-420, namely, \"In more challenging tasks, as reflected by higher MSE, the importance of unilateral coverage and diversity becomes more pronounced.\", does not seem very obvious from the table, where the diversified and the mix-unilateral dataset achieve the best performance when (Spread-v3 for Mix-unilateral and Reference-v3 for Diversified) the corresponding MSE is low.\n- Table 3: Why does setting $\\beta$ to a magnitude as large as 100 yield such good results? Doesn't the penalty term completely dominate the loss? Further, it seems strange to me that setting $\\beta$ across such a wide range (from 1 to 100) can yield almost the same result, especially when the dataset is the diversified one which contains a large fraction of low-return trajectories.\n- Figure 2: What does the x-axis represent?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- I am not an expert in RLHF, but to my best knowledge, this is the first work for aligning multi-agent systems with human feedback.\n- The theoretical claims are concise and seems to be practically useful.\n- The experiments are well designed for the purpose of verifying the proposed theoretical claims and empirical techniques."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper seeks to establish theoretical foundations and make empirical validations for the new research field, Multi-Agent Reinforcement Learning from Human Feedback (MARLHF). The core theoretical contribution is proving that single-policy coverage is insufficient for learning approximate Nash equilibrium policies and that unilateral policy coverage is sufficient to do so. The empirical contribution lies in two techniques, namely, reward regularization which smoothens the reward distribution, and dataset distribution-based pessimism which handles the extrapolation errors. The experiments are designed to verify the correctness of the theoretical claims and the effectiveness of the empirical techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The experiments are conducted on a limited range of tasks, which may not be sufficient to verify the generality of the theoretical claims and empirical techniques.\n\nAs far as I can tell, there are no other obvious weaknesses of this paper. Potential weaknesses concerning the consistency between the experiment results and the corresponding conclusions are listed as questions below."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We study Multi-Agent Reinforcement Learning from Human Feedback (MARLHF) with preference-only offline data."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024multiagent,\ntitle={Multi-Agent Reinforcement Learning from Human Feedback: Data Coverage and Algorithmic Techniques},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4vPC6Aj6N7},\nnote={under review}\n}"
},
"abstract": {
"value": "We initiate the study of Multi-Agent Reinforcement Learning from Human Feedback (MARLHF), exploring both theoretical foundations and empirical validations. We define the task as identifying Nash equilibrium from a preference-only offline dataset in general-sum games, a problem marked by the challenge of sparse feedback signals. Our theory establishes the upper complexity bounds for Nash Equilibrium in effective MARLHF, demonstrating that single-policy coverage is inadequate and highlighting the importance of unilateral dataset coverage. These theoretical insights are verified through comprehensive experiments. To enhance the practical performance, we further introduce two algorithmic techniques. \n(1) We propose a Mean Squared Error (MSE) regularization along the time axis to achieve a more uniform reward distribution and improve reward learning outcomes. \n(2) We propose an extra penalty based on dataset distribution to incorporate pessimism, enhancing stability and effectiveness during training.\nOur findings underscore the multifaceted approach required for MARLHF, paving the way for effective preference-based multi-agent systems."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"multi-agent reinforcement learning",
"reinforcement learning with human feedback",
"dataset coverage"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0606799dd9e8bc840831f00ae37ad50960381867.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/ad5a85e9f3ecce7bed7cd27bb74d1a95a575ef9b.zip"
},
"title": {
"value": "Multi-Agent Reinforcement Learning from Human Feedback: Data Coverage and Algorithmic Techniques"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4vm6Nn2DW9 | Exploring Temporal Semantic for Incomplete Clustering | main | Active | Temporal semantic;incomplete clustering;human motion segmentation | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;6;6 | 3;3;3;3 | 2;2;3;3 | 2;2;3;3 | 2;2;4;3 | 4.5 | 3 | 2.5 | 2.5 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Why do you not include any temporal clustering methods (possibly after interpolation)? How do temporal clustering methods perform on this type of data?\nWhich of the methods in related work are applicable to your evaluation scenario? You could consider including a table that lists core properties, indicating which of them are met by which competitor, instead of (long) textual descriptions."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1. The paper addresses an important problem of handling missing values.\nS2. The empirical evaluation makes use of five different benchmarking data sets for motion segmentation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a method for clustering of sequential data with missing values. It incorporates temporal constraints to model the expected continuity across time. It characterises the method theoretically, and evaluates on different motion segmentation benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. The presentation of the paper is weak, lacking discussion, which makes it hard to follow. Concretely, the scope of the paper is unclear initially, going from clustering of missing features to subspace clustering and back. After that, the method and its properties are presented, without discussing how it relates to existing work, what motivates each step, and whether there are design alternatives.\nW2. The discussion of related work fails to convey which methods are related in terms of applicability to the problem under study or in terms of method similarities, and which are less closely related. Instead, the related work section lists technical aspects of different methods, without any assessment as to their suitability for the problem under study.\nW3. The empirical study is weak. In the experimental evaluation, only methods for clustering with missing data are studied, but none for clustering temporal data. As the datasets under study are characterised by strong temporal signals, the competitors are thus very weak baselines.\nW4. The paper fails to provide sufficient information about the setup in the experiments, and some details about the method are confusing. For example, for the experiment in Fig. 2, linear interpolation is used prior to running the method. This seems to contradict the purpose of the method of being able to handle missing data. Also, it is unclear if linear interpolation was also used prior to running the competitors. This should be clarified in the description, and experiments comparing with and without interpolation should be conducted.\nW5. The accuracy in several of the experiments is very high, close to 80%, even when half of the data is missing - indicating that the problem might be too easy for any temporal method (as stated above, none of them are considered here). The experimental evaluation should thus include temporal clustering methods as well, possibly using interpolation if necessary (as in W4). More challenging datasets, where missing data has a stronger impact on accuracy should be studied in order to understand the robustness of the method.\nW6. It is unclear how quickly the method converges in general, only one example is provided for one of the datasets. The paper should provide convergence results across datasets and runs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "(Q1) Why are the Figure 3 to Figure 6 only reported on a single dataset while ignoring the other 4 selected datasets? \n\n(Q2) Please introduce the data in a more structured way before section 3. A clearer definition should also be introduced to show what do you mean by ‘temporal semantics’ and why they are important, instead of talking about concept in a high-level way.\n\n(Q3) Please also include the description of temporal semantic in algorithm 1 to explicitly make people know how it works."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "(S1) The work proposes a novel angle to cluster human motion data considering data temporal sequence.\n(S2) It outperforms the previous baselines when adapted with the ‘missing entries’ data processing proposed by this work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduced a novel clustering framework called Exploring Temporal Semantic for Incomplete Clustering (ETC-IC), which leverages temporal information within sequential data to enhance the clustering accuracy. Unlike previous works, ETC-IC clusters data without requiring prior imputation, making the results less sensitive to missing data attribute issues. This work valid ETC-IC on 5 human motion benchmarks and the proposed model consistently surpasses current SOTA methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(W1) Experiment -- The experiments are not sufficiently well designed. Firstly, they adapt the proposed MAR and MNAR directly to the previous baselines without comparing the baselines’ original performances. Secondly, one of the core concept in this paper: missing entries, are manually designed and generated rather than an off-the-shelf nature existing in the dataset. The results should also show how the model behaves on samples without missing entries to show the model’s general capacity. Thirdly, the quantitative analysis is poor, where Figure 5 only showcases some samples while Figure 7 is a case study rather than quantitative analysis.\n\n(W2) Presentation -- Moreover, the presentation of the experiments is with many errors. Firstly, both Figure 1 and Figure 8 report results on 5 datasets, but the figure caption and paper introduce there are only 4 datasets instead of 5. Secondly, the introduction to Mocap is poor, where it doesn’t introduce clearly what are the sequence data. Thirdly, Fig 2 (b) and Fig 3 (b) are with low quality. There is a clear obstruction between plot lines and bottom-left frames. Fourthly, the ablation study is reported in a Table but presented as a Figure (Figure 8). Also, the caption is on top of figure 8 while all the other captions for Figures stay in the bottom. \n\n(W3) Lack of guidance -- Besides the experiments, it’s also very hard to comprehend the authors formal derivation, as there are very few intuitive explanations. Without a clear guidance and introduction to the data, the author directly introduces the formulas without elaborating how the symbols are connected to the data in this work. Later on, in the experiment, only a simple ablation study in Figure 8 shows the effectiveness of ‘temporal semantics’, while it doesn’t analyze the effectiveness of theorem1 to 4 respectively, leaving it unknown which part of the theorem truly works, and which part might fail. Also, the theorem 3 is missing, where it is replaced by ‘proposition 3’."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is the objective function convex, and if so, suggest adding a proof of convergence analysis instead of just giving a figure 6?\n2. Did the authors use five datasets or four? Why is it four at one time and five at another?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper presents a clustering framework distinguished by its remarkable adaptability in addressing the inherent challenges posed by incomplete sequential data.\n2. We introduce an innovative temporal semantic consistency constraint, which markedly enhances the efficacy of subspace clustering for sequential data.\n3. We provide a rigorous theoretical analysis, enabling an equivalent approach even in the presence of missing data, whilst effectively exploring temporal semantics."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces ETC-IC framework, which possesses the capability to seamlessly integrate temporal information while concurrently addressing the challenge of missing data. Firstly, to manage the issue of missing entries, ETC-IC employs an algebraic subspace analysis and develop a theoretically grounded alternative, thereby ensuring accurate clustering even in the presence of incomplete data. Secondly, ETC-IC explores the temporal semantics inherent in sequential data by aligning data points and their temporal assignments through a temporal semantic consistency constraint, thereby ensuring that data points with similar temporal semantics are clustered together. The handling of missing data and the exploration of temporal semantics are unified within a single cohesive framework, thereby demonstrating the adaptability and versatility of the proposed method in addressing incomplete sequential data as required."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In this paper, while the algorithm has already been exhaustively described and experimentally validated, it is recommended to include an analysis of the algorithm's time complexity to further enhance the completeness.\n2. it is recommended to incorporate additional evaluation metrics to further strengthen the assessment of its performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1.\tIt is recommended to provide a summary of the entire work through a framework figure.\n2.\tThe model is evaluated solely on the human motion dataset, and thus this work is validated within the human motion domain. Consequently, the current title is not appropriate, authors should revise it to ‘human motion learning’ or evaluate this model in a broader context.\n3.\tIn the ABLATION STUDY section, the authors should provide a detailed explanation of the ablation experiment setup and non-temporal semantics to validate the module's effectiveness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper is well written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Comments:\nThe manuscript presents a novel clustering framework, ETC-IC, intended to tackle the issue of clustering sequential data with incomplete features. The topic is of significant contemporary relevance, given the increasing focus on data with missing attributes in the clustering domain.\n\nWeaknesses:\n1.\tIt is recommended to provide a summary of the entire work through a framework figure.\n2.\tThe model is evaluated solely on the human motion dataset, and thus this work is validated within the human motion domain. Consequently, the current title is not appropriate, authors should revise it to ‘human motion learning’ or evaluate this model in a broader context.\n3.\tIn the ABLATION STUDY section, the authors should provide a detailed explanation of the ablation experiment setup and non-temporal semantics to validate the module's effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n1.\tIt is recommended to provide a summary of the entire work through a framework figure.\n2.\tThe model is evaluated solely on the human motion dataset, and thus this work is validated within the human motion domain. Consequently, the current title is not appropriate, authors should revise it to ‘human motion learning’ or evaluate this model in a broader context.\n3.\tIn the ABLATION STUDY section, the authors should provide a detailed explanation of the ablation experiment setup and non-temporal semantics to validate the module's effectiveness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024exploring,\ntitle={Exploring Temporal Semantic for Incomplete Clustering},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4vm6Nn2DW9},\nnote={under review}\n}"
},
"abstract": {
"value": "Clustering data with incomplete features has garnered considerable scholarly attention; however, the specific challenge of clustering sequential data with missing attributes remains largely under-explored. Conventional heuristic methods generally address this issue by first imputing the missing features, thereby making the clustering results heavily reliant on the quality of imputation. In this paper, we introduce a novel clustering framework, termed ETC-IC, which directly clusters incomplete data with rigorous theoretical guarantees, whilst concurrently leveraging temporal semantic consistency to enhance clustering performance. Empirical evaluations demonstrate that the proposed model consistently surpasses current state-of-the-art methods in clustering human motion data."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Temporal semantic",
"incomplete clustering",
"human motion segmentation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d407c1ecb06074b35dc5a8a2ce58fe4a1a68a4bc.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Exploring Temporal Semantic for Incomplete Clustering"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4vzGQcVUG8 | Provable weak-to-strong generalization via benign overfitting | main | Active | benign overfitting;spiked covariance models;overparameterized models;interpolation;pseudolabeling;weak-to-strong generalization;alignment | learning theory | 5;6;6;8 | 2;3;3;2 | 3;3;4;3 | 2;3;4;3 | 2;2;3;2 | 6.25 | 2.5 | 3.25 | 3 | 2.25 | -0.229416 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The main setup is quite confusing to me. The paper first states that \"$f_{weak} \\in \\mathbb{R}^d$\" is the object we learn. Normally, the model is a function, not a vector, so this was not immediately clear. It is defined later in line 347 how we learn $ f $, which is quite far from where it was introduced (line 184). It would be better to define that we train $f$ by MNI earlier.\n\n2. In line 201, it says, \"As a consequence of our main results in Section 3, we will show that the above desiderata are achievable in a simple toy model; see Theorem 3.3 for a formal statement.\" However, Theorem 3.3 only considers desiderata 1.2 and 2.1, not the entirety of the desiderata.\n\n3. What is \"$t$\" in Equation (3) of Theorem 3.1?\n\n4. The notation $ u, p, q, r $ used is not very intuitive, and it makes the result difficult to interpret. Is there a simpler way to rephrase the result?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The math appears correct to me; the problem is significant, and desiderata 1 and desiderata 2 make sense."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates weak-to-strong generalization in the setting of an overparameterized spiked covariance model with Gaussian covariates. The paper identifies an asymptotic phase transition between successful and unsuccessful generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is rather technical, and the clarity could be improved significantly to make it more readable. (see questions)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Suggestions:\n\n1. Reduce the introduction - it currently spans 2 pages. \n2. Figure 1 is useless.\n3. The section on data model was not particularly needed. Page 5 and 6 can be compressed into 1 or 2 paragraphs.\n4. Include some experiments in main body.\n\nIn general the paper is quite verbose, it can be compressed substantially and content moved back into main body."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Exact characterization of the regime where weak to strong generalization occurs in terms of parameters of the covariance matrix of strong and weak features."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The papers identifies a specific setting under which weak to strong generalization occurs. Consider a strong model that learns a classifier on strong features of the data by supervised learning on $m$ weak labels given by a weak model that was trained on weak features on $n$ clean labels. Then weak to strong generalization implies that \n\nCondition 1): The strong model has perfect classification accuracy whereas the weak model has close to random accuracy. \n\nCondition 2): The generalization is due to weak labels, i.e. if the strong model was only trained on $n$ clean labels, there is no generalization.\n \nThe setting is as follows: A learner observes features distributed according to a Gaussian distribution, $x \\sim N(0, \\Lambda)$ where $\\Lambda$ is diagonal covariance matrix following a bilevel ensemble parameterization \n\\begin{equation}\\lambda_j = \\lambda_F = \\frac{ad}{s} \\text{ for } 1 \\leq j \\leq s \\text{ otherwise } \\lambda_j = \\lambda_U = \\frac{(1-a)d}{d-s}\\end{equation}\nwhere $d = n^p, s= n^r, a = n^{-q}$ and $p > 1; q, r >0; q+ r < p$. For multiclass setting, classes are further scaled as $k = c_k n^t$ for some $t<r$. The strong model observes features given by some $p, q, r$ and weak model observes features characterized through $p_{weak}, q_{weak}, r_{weak}$. In particular the strong features $x_{strong}$ and weak features $x_{weak}$ are given as \n$$ x_{strong} = N(0, \\lambda_F I_{[s]} + \\Lambda_U I_{[d]/[s]}) $$\n$$ x_{weak} = N(0, \\lambda_{F, weak} \\Pi_S + \\Lambda_{U, weak} \\Pi_T)$$\nfor some subsets $S \\subseteq [s], T \\subseteq [d]/[s]$ and $\\Pi_S$ denotes projection onto axis aligned subspace indexed by $S$. $\\lambda_{F, weak} = \\frac{a_{weak}d_{weak}}{s_{weak}}$ and $\\Lambda_{U, weak} = \\frac{(1-a_{weak})d_{weak}}{d_{weak}-s_{weak}}$.\n\n The true labels are given by $y = \\text{sign}(x_1)$ for binary classification and $y = \\arg\\max_k (x_1, \\dots x_K)$ for $K$ way classification. \n\n\n \nIn this parameterized setting, the authors show that there is a particular regime of number of weak labels $m$ provided by the weak model (for certain regimes of $p, q, r, p_{weak}, q_{weak}, r_{weak}$) where weak to strong generalization occurs (condition 1) holds). The conditions (for binary classification) are given by (assuming $m = n^u$)\n\n1. $u + \\min(1 -r, p + 1 - 2(q + r)) > q_{weak}+r_{weak} > (p_{weak} + 1)/ 2$\n2. $p + 1 > (q + r + q_{weak} + r_{weak})$\n3. $u < (p + 1 + q + r - (q_{weak} + r_{weak})/ 2)$ \n\nFurther the classification error of strong learner trained on $n$ cleaned labels is shown to be depend as \n$$1/2 - 1/\\pi \\arctan (\\Theta(n^{p+1 - 2(q+r)}))$$\n\nThus they claim one can identify regimes under which condition 2) also holds (possibly when $p+1 - 2(q+r) << 1$) although no details are provided).\n\nFurther they provide an informal claim and details in appendix that there exists some regime for multi class setting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Most of the important details are pushed into appendix. The main body only contains one useful theorem which identifies a certain condition where condition 1) of weak to strong generalization holds. Setting for condition 2) and multi class settings are merely mentioned as claims. The main body also does not provide proof sketch or provide insights into the proof of the theorem."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "21) What the the word \"represent\" mean in Desiredata 1.(ii).\n2) What is the significance of the bi-level-ensemble? \n3) What is $t$ in Theorem 3.1?\n4) Is there a reason for choosing a halfspace for the ground truth? Does this analysis extend to other concepts. Is there a similar notion for regression (rather than classification)?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1) This work addresses the important problem of obtaining theoretical justification for a frequently encountered empirical phenomenon\n2) The lower tail for max of correlated gaussians is an interesting result."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors provide theretical justification for the empirically observed phenomenon of weak to strong generalization. In this setting, a weak learner is used to created labelled examples (from unlabelled training data) that is used to further train a stronger model. The intuition is that the weak learner has learnt some useful information about the ground truth and hence the pseudolabels it generates will actually enable generalization. The authors prove that this weak to strong generalization has two phases: (1) when the number of pseudolabelled examples is less than some threshold, the strong learner behaves like a random guesser, (2) beyond the threshold the strong learner achieves perfect generalization. A technically interesting tool that they use is a new lower tail for the max of correlated gaussians which could be of independent interest."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "See questions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses above. It would be nice if the authors can address these."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper studies a phenomenon that has been empirically observed and thus relevant to practice\n- The results and proof techniques seem non-trivial and interesting"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies a toy-model for weak-to-strong generalization. They show that under the assumptions of their toy-model, two asymptotic phases occur for the student: (1) it is able to successfully generalize or (2) the student resorts to effectively random guessing. The authors also try to extend their results to weak-to-strong multiclass classification and derive new lower tail inequalities for the max of correlated gaussians."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I find the organization and presentation a bit confusing and hard to parse. In particular, Theorem 3.3 is hard to interpret without referencing the Desiradatum outlined in Section 2. Ideally, Theorem 3.3 should be standalone and at the very least, the variables in Theorem 3.3 like $\\tau_{weak}, p_{weak}, ...$ should be defined. In addition, and in my opinion, the notation and current presentation of the result doesn't really make it seem like this is a \"simple, toy model\", given how many free variables there are to keep track of. One possible fix is to give more intuition and less notation about the toy-model in the main text, and push the details into the Appendix. For example, I think it would be really helpful to have an informal, non-rigorous theorem summarizing the main result in the Main Contributions section.\n\nIn addition, I am not sure what to take away from this paper. It is nice that you found a toy example, where you can provide rigorous evidence of the empirical phenomena of weak-to-strong generalization. However, I am not convinced this toy model is realistic/relevant to practice, even after reading the Modeling assumptions in the Discussion. In short, it would be nice if the authors can answer:\n- **why** one should care about finding a \"simple, concrete theoretical setting where we can provably exhibit different phases of weak-to-strong generalization?\" \n- what can I take away from this result?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We use recent advances in benign overfitting for classification to prove weak-to-strong generalization in a toy setting."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024provable,\ntitle={Provable weak-to-strong generalization via benign overfitting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4vzGQcVUG8},\nnote={under review}\n}"
},
"abstract": {
"value": "The classic teacher-student model in machine learning posits that a strong teacher supervises a weak student to improve the student's capabilities.\n We instead consider the inverted situation, where a weak teacher supervises a strong student with imperfect pseudolabels. \n This paradigm was recently brought forth by \\citet{burns2023weak} and termed \\emph{weak-to-strong generalization}. \n We theoretically investigate weak-to-strong generalization for binary and multilabel classification in a stylized overparameterized spiked covariance model with Gaussian covariates where the weak teacher's pseudolabels are asymptotically like random guessing.\n Under these assumptions, we provably identify two asymptotic phases of the strong student's generalization after weak supervision: (1) successful generalization and (2) random guessing. \n Our techniques should eventually extend to weak-to-strong multiclass classification. \n Towards doing so, we prove a tight lower tail inequality for the maximum of correlated Gaussians, which may be of independent interest.\n Understanding the multilabel setting reinforces the value of using logits for weak supervision when they are available."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"benign overfitting",
"spiked covariance models",
"overparameterized models",
"interpolation",
"pseudolabeling",
"weak-to-strong generalization",
"alignment"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e0e9c3071424318e0cf247d02889aac6e0842694.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Provable weak-to-strong generalization via benign overfitting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4w99NAikOE | IterComp: Iterative Composition-Aware Feedback Learning from Model Gallery for Text-to-Image Generation | main | Active | Compositional text-to-image generation;Feedback learning for diffusion model | generative models | 5;6;6;6;6 | 3;4;4;4;3 | 2;3;3;3;3 | 2;3;2;3;3 | 3;3;3;3;3 | 5.8 | 3.6 | 2.8 | 2.6 | 3 | 0.612372 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work claims to be the first work to introduce a reward-controlled framework in the concept composition generation, which is somehow novel in this field.\n2. This work is presented well with complete theoretical details and proof.\n3. The quantitative experimental results show better performance compared to SOTAs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes IterComp, an iterative composition-aware reward-controlled framework. It introduces a model gallery and constructs a high-quality composition-aware model preference dataset. Utilizing a new iterative feedback learning framework, IterComp progressively enhances both the reward models and the base diffusion model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The qualitative comparison in Fig. 4 is confused. There seems to be a marginal improvement in Line 3 and Line 4. The authors should make the difference between the qualitative examples clear to be recognized.\n2. The qualitative examples are not enough to evaluate the model performance since the cases in the paper are somehow complex. The authors can provide more examples for a single case. Also, there is a need to evaluate the stability of the proposed model.\n3. The comparison with InstanceDiffusion is confusing. As a layout-guided method, InstanceDiffusion needs detailed layout inputs. It is not fair to provide only one box if the case includes two or more instances, as indicated in the third line of Fig. 4. As the authors attempt to compare with layout-to-image methods, a SOTA method named MIGC [1] is also not included.\n\n[1] Zhou, Dewei, et al. \"Migc: Multi-instance generation controller for text-to-image synthesis.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Novel and practical approach: The paper presents a simple yet effective way to combine multiple models' strengths for compositional generation without increasing computational complexity.\n\nStrong empirical results: The method shows clear improvements over existing approaches, with comprehensive evaluations on both compositional accuracy and image quality metrics.\n\nWell-structured technical contribution: The paper provides clear theoretical analysis with detailed proofs, and the iterative feedback learning framework is well-designed and easy to implement."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces IterComp, a novel framework that enhances compositional text-to-image generation by aggregating preferences from multiple diffusion models through iterative feedback learning. The approach demonstrates superior performance in both compositional accuracy and image quality, while maintaining efficient inference speed. The main strength lies in its ability to combine different models' advantages without adding computational overhead, though the long-term stability of the iterative learning process could be further explored."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper mentioned that RPG is challenging to achieve precise generation, but Tab2 did not compare with RPG, and I checked that RPG's performance on T2I-Compbench is better than that of the paper.\n\nIt is necessary to test the results of FLUX-dev directly on the t2i compbench to see how much improvement the method proposed in this paper has. I currently suspect that the improvement may not be very significant."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. As mentioned in W.1, how long does the training loop take (including the iterative feedback learning)?\n2. Could I use this method to improve a specific concept generation (e.g., a human-object interaction)? How much time does it take from collecting synthetic data to finalizing the model training?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper presents a novel framework combining preferences from multiple diffusion models to enhance compositional text-to-image generation and address the relationship understanding in diffusion models.\n2. The qualitative/quantitative results show comparable improvements in compositionality.\n3. A composition-aware dataset is collected which provide diverse preferences that inform the reward models. (will it be released in the future?)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper propose a framework that aggregates composition-aware model preferences from multiple models and employs an iterative feedback learning approach to enhance T2I compositionality and general performance. The qualitative and quantitative results show their SOTA compositional generation capabilities compared to previous works."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There is limited discussion of the computation resources required to manage multiple reward models, which may affect the scalability in large-scale applications. Although the authors claim that their model has fast inference speed, the cost of model training and data collection is not clear. This makes me feel less likely than DPO to be widely used in practice.\n2. The user study only demonstrates the user preferences lacking the deep analysis of attribute binding and object relationship, which are critical to model performance. 16 samples is also too small to evaluate such a complex task."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In addition to the weakness mentioned above, I have a question regarding stability. \n\nBased on my experience, RLHF in diffusion models can often be unstable. I’m curious whether your method consistently produces stable results or if there’s a risk of occasionally poorer outcomes. I’m concerned that the iterative training process might lead to overfitting on biases present in the reward models, potentially reducing overall robustness.\n\nI hope the authors can make up for the weaknesses mentioned and address these questions."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-structured, making it accessible and easy for readers to follow.\n2. Clear formulas are provided for each component, effectively removing any ambiguity.\n3. Mathematical proofs substantiate the validity of the proposed method.\n4. The authors conduct detailed, extensive experiments to support their approach.\n5. Illustrative images are included, enhancing clarity and understanding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study addresses improving the compositional text-to-image generation capability of less powerful diffusion models. The authors contribute in two main areas. First, they decompose the capabilities of various diffusion models into three domains—attribute binding, spatial relationships, and non-spatial relationships—and rank model outputs accordingly to guide reinforcement learning with reward models specific to each domain. Second, they introduce an iterative training process that utilizes the fine-tuned diffusion model outputs to progressively enhance the reward models.\n\nThrough multiple experiments, the study demonstrates the proposed method’s effectiveness, enabling early-stage models to achieve comparable generative abilities with reduced inference time. The authors also verify the effectiveness and general applicability of each design component across different models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. About the number of texts used for attribute binding, \"500 prompts\" in Line 185 is inconsistent with \"1500\" in Table 1. Which is correct?\n2. Although the experiments are detailed, some comparisons appear incomplete. The reinforcement learning from human feedback (RLHF) approach leverages outputs from advanced models like FLUX and SD3 for training, yet direct comparisons with these models are not provided. Including these comparisons would better highlight the method's effectiveness.\n3. An additional experiment focusing on the first-round fine-tuned model—trained solely with human-annotated data—would be valuable. This would clarify the necessity and impact of the iterative training approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Training time considerations:\n - Diffusion-DPO operates in the latent space without requiring image decoding. In contrast, IterComp requires image decoding for the reward models (though this could be avoided by training the reward model with latent space inputs), likely resulting in slower training. Additional commentary on the training scheme would be valuable.\n* Potential code and paper discrepancy:\n - The paper describes an iterative feedback mechanism that optimizes both reward models and the base model. However, examination of `feedback_train.py` reveals that only unet parameters (base model) are passed to the optimizer. This suggests that only the base model is being optimized, while reward models remain static. This difference requires clarification.\n* Question regarding test-time adaptation:\n - Could the iterative feedback mechanism be applied as test-time adaptation of the base model? Similar to Slot-TTA [1], the base model could be optimized using reward models to improve compositional quality. The process would work as follows: for a given prompt, the base model generates an image, which is then evaluated by reward models. The base model's parameters would be updated to maximize these rewards. This process could be repeated for several iterations. This approach would eliminate the need for training the base model, allowing it to adapt to any prompt at test time through multiple iterations. Comments on this possibility would be valuable.\n\n[1] Test-time Adaptation with Slot-Centric Models, Prabhudesai et al., ICML 2023"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The collected dataset can lead to better models and new research directions. Furthermore, researchers can follow a similar approach to collect their own data. The data can also be used for other RLHF methods, such as diffusion-DPO.\n* The iterative feedback mechanism seems to be a novel way to optimize both reward models and the base model.\n* The code is shared, which allows reviewers to follow the details of the proposed method.\n* The performance gain from IterComp appears significant, as evaluated through user studies, quantitative analysis, and qualitative assessment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a new dataset for compositional generation where experts evaluate the outputs of multiple pre-trained models. The dataset consists of three aspects of compositionality: attribute binding, spatial relationships, and non-spatial relationships, along with 52.5K image-rank pairs, which can be used for training feedback, ranking, or reward models.\n\nThe second contribution of the paper is using the collected dataset to train reward models for each of the three key aspects. A multimodal model (BLIP) is used as a feature extractor for both the prompts and the generated images, and the extracted features are projected by MLPs to output the reward. The goal is to predict how good the given image-prompt pairs are by training the model similar to contrastive learning (moving toward winning examples and away from losing examples).\n\nThe third contribution is improving the compositional ability of a base model (SDXL is selected but it should not be limited to that) by optimizing it using the trained reward models. The base model is trained to maximize the reward model's output so that its outputs are better aligned with the reward models, which are trying to enforce compositionality. Furthermore, the paper proposes an iterative update mechanism for both reward models and the base model. Reward models are updated to predict the rankings generated by experts while the base model is updated to maximize the outputs of reward models. Through this process, both the base model and reward models are improved for their specific tasks.\n\n**Important note:** This review's technical content and analysis are my original work. Large Language Models were used solely to improve grammar and writing style, without altering any meanings or substantive feedback."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Table 5 should be in the main part instead of the Appendix, as it simply demonstrates that the proposed method outperforms previous methods.\n* Several experiments are missing:\n - The paper combines all reward models simultaneously, likely leading to improved compositional performance. However, reviewers would benefit from seeing the individual effect of each reward model.\n - While SDXL is chosen as the base model, testing other models would help reviewers understand how reward models affect different base architectures.\n - It would be valuable to examine Diffusion-DPO's performance when trained on the collected dataset. Currently, Diffusion-DPO is trained on the pick-a-pic dataset, which is larger but lacks compositional details. These results would be necessary to evaluate the proposed method's effectiveness using consistent standards.\n* Expert ranking may inadvertently include aesthetic information [This observation is meant to prompt discussion rather than highlight a weakness, as the underlying cause remains unclear]:\n - When IterComp is applied, the aesthetic score improves, suggesting that reward models can interpret image aesthetics, since reward maximization leads to better aesthetic scores. This could be because either expert rankings are influenced by image aesthetics (beyond compositional attributes) or because models with better composition naturally generate more aesthetic images (for instance, FLUX, being the best compositional model, likely produces more aesthetically pleasing outputs).\n* Some experimental conditions may be misleading:\n - The paper uses 40 inference steps for all models to ensure fairness. However, some models can generate samples with fewer steps; for example, FLUX-dev uses 28 steps by default."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024itercomp,\ntitle={IterComp: Iterative Composition-Aware Feedback Learning from Model Gallery for Text-to-Image Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4w99NAikOE},\nnote={under review}\n}"
},
"abstract": {
"value": "Advanced diffusion models like Stable Diffusion 3, Omost, and FLUX have made notable strides in compositional text-to-image generation. However, these methods typically exhibit distinct strengths for compositional generation, with some excelling in handling attribute binding and others in spatial relationships. This disparity highlights the need for an approach that can leverage the complementary strengths of various models to comprehensively improve the composition capability. To this end, we introduce IterComp, a novel framework that aggregates composition-aware model preferences from multiple models and employs an iterative feedback learning approach to enhance compositional generation. Specifically, we curate a gallery of six powerful open-source diffusion models and evaluate their three key compositional metrics: attribute binding, spatial relationships, and non-spatial relationships. Based on these metrics, we develop a composition-aware model preference dataset comprising numerous image-rank pairs to train composition-aware reward models. Then, we propose an iterative feedback learning method to enhance compositionality in a closed-loop manner, enabling the progressive self-refinement of both the base diffusion model and reward models over multiple iterations. Detailed theoretical proof demonstrates the effectiveness of this method. Extensive experiments demonstrate our significant superiority over previous methods, particularly in multi-category object composition and complex semantic alignment. IterComp opens new research avenues in reward feedback learning for diffusion models and compositional generation."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Compositional text-to-image generation",
"Feedback learning for diffusion model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/fcfed353ef28cebc24d7813167dfea80074dab09.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/7791136c5f27b8ad1a58d37e5da5cb8c43370cef.zip"
},
"title": {
"value": "IterComp: Iterative Composition-Aware Feedback Learning from Model Gallery for Text-to-Image Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4wk2eOKGvh | Test-Time Ensemble via Linear Mode Connectivity: A Path to Better Adaptation | main | Active | test-time adaptation;domain adaptation;linear mode connectivity | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 5;6;6;6 | 4;4;4;3 | 3;4;3;3 | 2;3;2;3 | 2;4;3;4 | 5.75 | 3.75 | 3.25 | 2.5 | 3.25 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Writing quality is good. The paper is well-structured, and clearly written. \n2. Good insights. This paper explores TTA as a domain generalization problem, uncovering linear connectivity within TTA models. This perspective suggests that domain generalization techniques could enhance model representations for TTA tasks. \n3. SOTA performance. The proposed method achieves the state-of-the-art performance via the integration with different TTA models in various scenarios.\n4. Ablations. Ablation experiments are provided to verify the effectiveness of the proposed modules."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel test-time ensemble approach that can be seamlessly integrated with existing TTA models to enhance adaptation. Specifically, the proposed framework reduces domain gaps through two ensemble strategies: weight averaging of TTA models and dropout. Additionally, a knowledge distillation strategy is employed to mitigate both noise and bias for improving model robustness under different distribution shifts. \nExtensive experiments are conducted in different TTA scenarios to demonstrate the superiority of the proposed method over existing baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the results presented in Tables 3 show the performance improvement achieved by the proposed framework in the continual TTA scenario, it is unclear how the method enhances baseline performance in later adaptation stages. Additionally, I would like to know if the proposed method addresses the issue of catastrophic forgetting in this context. \n\nAdditional question:\nIs it possible to extend the proposed benchmark construction method to dense prediction tasks, such as semantic segmentation? It would be very meaningful if it can be applied to various tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why the results of CoTTA in Continual TTA with non-i.i.d. conditions are only 2.2% and 3.4%?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. TTE utilizes an ensemble strategy to dynamically enrich model representations during online test-time adaptation (TTA), which is an interesting approach.\n2. TTE constructs an ensemble network by averaging the parameter weights of different TTA models, and this weight averaging captures model diversity, improving representation quality without increasing the computational burden of managing multiple models.\n3. TTE further promotes the diversity of representations within TTA models by combining with dropout, and proposes a debiased and noise-resistant knowledge distillation scheme to stabilize the learning of TTA models in the ensemble.\n4. The experiments are extensive and the results are superior to other compared methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a method called Test-Time Ensemble (TTE), which uses an ensemble strategy to dynamically enrich model representations during online test-time adaptation (TTA). TTE constructs an ensemble network by averaging the parameter weights of different TTA models, which are continuously updated using test data. This weight averaging technology captures model diversity and improves representation quality without increasing the computational burden of managing multiple models. TTE further combines dropout to promote diverse collaboration of representations within TTA models, and also proposes a debiased and noise-resistant knowledge distillation scheme to stabilize the learning of TTA models in the ensemble. TTE can be seamlessly integrated with existing TTA methods, enhancing their adaptive capabilities in various challenging scenarios."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The TTE method involves multiple hyperparameters, such as the momentum coefficient , dropout ratio and temperature, which may affect the stability and generalization of the method. Further research on how to reduce the dependence on hyperparameters is crucial.\n2. TTE integrates many well-established techniques such as ensemble, dropout, knowledge distillation, which have been utilized in TTA or few shot learning. The combination has weaken the novelty of the paper, and the unique contributions of the paper should be classified.\n3. How to conduct the weight-space ensemble without adding computational complexity? Will the technique increase storage consumption?\n4. The knowledge distillation-based debiasing and anti-noise strategies proposed in the paper may not be able to completely solve the problem of noisy test data. How to solve the scenario that the pseudo labels are incorrect?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Here, I am adding the questions that I find relevant for improving the work quality; some of them were already discussed in the Weaknesses section:\n\n- Could you clarify this a bit more in section 3.2? How is it important for the method?\n\n- Do you think the batch size can impact the results, especially the ones provided for the continual TTA?\n\nPlease consider answering the points on the weaknesses as well. Furthermore, I am open to discussion, and I think that the work has a good potential for the community."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The paper is well-written, easy to follow, and detailed. I liked how the authors presented the work and motivated toward the problem. Furthermore, the results are motivating, and the idea seems easy to implement on top of different methods (as demonstrated by the authors), which can be beneficial for the community if the authors also provide the full code for reproducibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces the Test-Time Ensemble (TTE), a method designed for TTA using the theory of weights space ensemble, which can be used on top of different TTA methods. The authors show different results for TTA over corruptions with different baselines, and the method seems to work pretty well. Furthermore, the authors also provided results for continual TTA, which is interesting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Personally, I did not see many problems with the paper, but I would suggest the authors proofread again to avoid problems such as the following typo \"with lager and more complex\" -> \"with larger and more complex\" in the introduction or \"Adaptvie momentum\" -> \"Adaptive momentum.\" \n\nIf the authors work on the following points, it will improve a lot the quality of the work: \n- I am not so convinced by section 3.2, DE-BIASED AND NOISE-ROBUST KNOWLEDGE DISTILLATION. Could you clarify this a bit more in this section? And maybe make it more clear in the paper.\n\n- In Equation 5, there is no hyperparameter to balance the terms. I think it should be included, right? \n\n- For Table 3, with continual TTA, the authors compared all other baselines with TTE with DeYO, but not TTE with other variants as well, and for continual TTA, I don't understand how the performance can still perform well in the direction of the adaptation without degrading too much as we can see in the other approaches (for example DeYO goes from 28.1 to 3.7 and then 7.2), for me it only make sense if you ensemble with zero-shot model as well (or reset the model weights) but if this is the case it should be done for all other baselines as well.\n \n- For Table 4, column V2 seems strange, as almost all results in a) are 68.9, even DeYO and DeYO with TTE. Could you also add more results with other batch sizes? (the batch size can play an important role in different algorithms, which can benefit DeYO and not the others. For instance, I recommend taking a look at the paper \"Bag of Tricks for Fully Test-Time Adaptation, IEEE/CVF Winter Conference on Applications of Computer Vision. 2024\", which shows the role of batch size in some of the TTA algorithms.\n\n- I would suggest revisiting some of the baselines for Tab 1. I would also consider a baseline with other methods of the local ensemble as well, such as SWA with TTA, and for Tab 4. I would also add other methods with TTE (maybe in the supp. material)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) This paper proposes the new problem, test-time problem, which is different from previous test-time adaptations. I believe this problem has some practical applications.\n\n(2) The paper proposes some simple baseline methods that can effectively address the problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers a new problem, test-time ensemble (TTE), which aims at using multiple models generated during TTA. This paper first formulates the test-time ensemble problem. The paper also proposes the weight average and dropout as the baseline methods to evaluate the performances. \n\nThe contributions can be summarized as: \n\n(1) The author revealed that TTA models exhibit linear mode connectivity, an in- triguing insight that simplifies and enhances the adaptation process.\n\n(2) The author introduced Test-Time Ensemble (TTE), a novel and computationally efficient approach that not only enriches model representations but also stabilize TTA optimization through de-biased and noise- robust knowledge distillation.\n\n(3) TTE integrated effortlessly with existing TTA methods, enhancing adaptation in diverse scenarios and showing potential for applicability to future TTA methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The analysis of test-time adaptation does not inspire the new methods. The moving average ensemble methods are popularly adapted in self-supervised learning and ensemble methods. I consider the Linear Mode Connectivity theory should tell the reason and the situation that the models generated during test-time adaptation. \n\n\n(2) Limited technical novelty: this paper proposes the two-branch structure and leverage the weight average to improve the performance. Similar techniques are implemented in https://github.com/huggingface/pytorch-image-models. I do not see anything new compared to what have been proved in image classification.\n\n(3) Unclear description. In section 3, the de-biased distillation subsection does not describe clearly where the bias comes from. I suggest the author should explain the bias again. Also, I can not understand what the connection between the spike phenomena of the accuracy curve and the bias."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Leveraging linear mode connectivity to enhance test-time adaptation through ensemble methods."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024testtime,\ntitle={Test-Time Ensemble via Linear Mode Connectivity: A Path to Better Adaptation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4wk2eOKGvh},\nnote={under review}\n}"
},
"abstract": {
"value": "Test-time adaptation is a valuable approach for online adjustment of pretrained models to handle distribution shifts in test data. While existing research has focused primarily on optimizing stability during adaptation with dynamic data streams, less attention has been given to enhancing model representations for improved adaptation capability. This paper addresses this gap by introducing Test-Time Ensemble (TTE), which leverages two key ensemble strategies: 1) averaging the parameter weights of assorted test-time adapted models and 2) incorporating dropout to further promote representation diversity. These strategies encapsulate model diversity into a single model, avoiding computational burden associated with managing multiple models. Besides, we propose a robust knowledge distillation scheme to prevent collapse during adaptation, ensuring stable optimization. Notably, TTE integrates seamlessly with existing TTA approaches, advancing their adaptation capabilities. In extensive experiments, integration with TTE consistently outperformed baseline models across various challenging scenarios, demonstrating its effectiveness and general applicability."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"test-time adaptation",
"domain adaptation",
"linear mode connectivity"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e4cc84ce42f742d5f02b7bc5bbbcd737a4198da0.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Test-Time Ensemble via Linear Mode Connectivity: A Path to Better Adaptation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4wmf3Ffhl2 | A Dynamic Model of Performative Human-ML Collaboration: Theory and Empirical Evidence | main | Active | Human-AI Collaboration;Human-Computer Interaction;Dynamic Systems;performative prediction;strategic behavior;human-in-the-loop;dynamic learning;deployment strategies | other topics in machine learning (i.e., none of the above) | 3;5;5;5 | 3;3;2;3 | 3;3;2;3 | 1;2;2;3 | 2;3;2;2 | 4.5 | 2.75 | 2.75 | 2 | 2.25 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* When are the conditions in Definition 1 expected to hold? Examples of suitable utility functions in binary classification and scalar regression can be very helpful.\n* Could notation in eq. (1) be clarified? Specifically, $Y_{H_{t-1}}$ seems to appear both as an argument of the function, and as a variable sampled from $D_{t-1}$.\n* How was Appendix Figure 6 (L403) generated?\n* In the theoretical analysis, how would results change if the training set in each step was finite?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper addresses a well-motivated topic.\n* The empirical analysis is grounded in data from real human subjects.\n* Results seem to provide interesting insights into ML-assisted decision-making contexts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "* This paper examines Human-ML collaboration under performative prediction settings through theoretical analysis and an empirical experiment on ML-assisted combinatorial knapsack problems.\n* In the setup, users interact with a predictive system in discrete time steps. At each time step $t$, a model $M_t$ predicts a label $Y_{M_t}$ based on features $X$. This prediction serves as decision support for a human decision-maker, who then makes their own prediction $Y_{H_t}$. Pairs $(X,Y_{H_t})$ are used to train the subsequent model $M_{t+1}$, and it is assumed that $M_{t+1}$ perfectly aligns with its training distribution. Definition 1 introduces utility $\\mathbb{U}(X, Y)$ for prediction-label pairs, defining its properties axiomatically. It then defines a the collaborative characteristic function which captures one-step utility improvement, and $\\\\mathbb{L}_{\\\\Delta\\\\mathbb{U}}(s,t)$ as the trajectory of expected utilities for a system whose initial utility is $s$. Propositions 1 and 2 show that utility trajectories converge under monotonicity assumptions.\n* The empirical section evaluates the impact of model-based advice on human solutions for the 0-1 knapsack problem. Human participants interact with an ML-supported system to solve knapsack problems, possibly receiving predictions of the optimal solution. Six models with varying accuracy were trained before the experiment using synthetic optimal solutions, and each experimental group received distinct models and possibly different monetary incentives. Results indicate that incentivization schemes had no significant impact on solution quality, while decision-support quality correlated with human solution quality. Collaborative learning trajectories were presented based on these results."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The paper claims to provide an empirical evaluation of performative prediction but seems to lack essential elements of this setup. Specifically, prediction models were trained on synthetic data before the experiment, and the experiment does not include \"feedback loops\" which are a defining component of performative prediction.\n* The theoretical analysis applies to a limited form of performative prediction, assuming that utility trajectories are determined solely by population-wide average utility, without taking the structure of the predictor into account. Functions like the collaborative learning path are interesting, it is not clear whether the definition are applicable in more general scenarios.\n* The empirical approach uses an atypical learning task: predicting a binary solution vector for a combinatorial 0-1 knapsack problem based on random synthetic instances and optimal solutions. The paper notes a possible analogy to multi-task classification, but it’s unclear how results extend to conventional ML tasks on non-synthetic data."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Can the author open-source the dataset provided by real human?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality: A substantive assessment of the strengths of the paper, touching on each of the following dimensions: originality, quality, clarity, and significance.\nQuality: The paper is supported by a robust empirical study involving 1,408 participants working on the knapsack problem. The statistical analyses performed provide strong support for the conclusions drawn, particularly regarding human improvements over ML predictions. Additionally, the paper critically examines the impact of monetary incentives on decision quality, contributing valuable insights to the field.\nClarity: The paper's motivation and conclusion are clear.\nSignificance: The paper gives some suggestions about the consideration of human behavior and the selection of the dataset to train the model."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the dynamic model of performative human-ml collaboration from both theoretical and empirical perspectives. The paper introduces the notion, the Collaborative Characteristic Function which connect the predicted label and the unknown ground truth. The paper does some empirical study that involves real human on the knapsack problems. Experimental results show that human tend to improve the model's performance, and human may submit worse results than the prediction by models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.Abstractness of Problem Domains: The study does not focus on specific classification or regression tasks, which makes the findings somewhat abstract. A more concrete application would enhance the practical relevance of the research.\n2.Limited Application Scope: The research primarily concentrates on the knapsack problem, neglecting more realistic scenarios, such as medical diagnosis. Exploring applications in critical areas like healthcare would significantly increase the paper's impact and relevance.\n3.Participant Preference Variability: While the study involves 1,408 participants, it lacks a detailed analysis of their preference differences. Understanding how individual preferences might affect decision-making is essential, as these variations could lead to suboptimal choices in certain instances.\n4.Simulation of Human Behavior: Beyond conducting real experiments with participants, the paper does not explore the potential for simulating human behavior. Employing simulations could reduce the costs associated with extensive human experimentation while still providing valuable insights into collaborative decision-making dynamics."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tThe paper initially presents that current human-ML collaborative systems face three crucial challenges, but the subsequent text does not detail the innovations made in addressing or alleviates these three issues. I hope to see a clear exploration of how the paper addresses or alleviates each of these challenges in the introduction.\n2.\tA deeper discussion on incentive mechanisms: Provide more discussion on the ineffectiveness of incentive mechanisms to help readers understand the potential reasons of this phenomenon.\n3.\tThe contributions are trivial, making readers difficult to understand the key points of this paper. I hope the author can rewrite their contributions.\n4.\tIn Definition 1, the definitions of Ymin and Y' are not specified. In Defination 5, the definition of x1…xn should be placed in the main text."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper has several strengths, as follows:\n1.\tA new dynamic framework is proposed for considering the deployment of ML models in human-ML collaborative systems.\n2.\tThe involvement of participants in real-world scenarios enhances the credibility of the research. The design of the empirical study allows for clear identification of the actual ground truth, providing evidence for the research results.\n3.\tThe findings of the paper have practical significance, aiding companies in optimizing the training and deployment strategies of ML models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper primarily presents a new dynamic framework for thinking about the deployment of ML models in performative human-ML collaborative systems, helping to understand how ML influences human decision-making processes. This research is intriguing and has practical value."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe paper is hard to follow, the complexity may make it difficult for readers to understand.\n2.\tThe research focuses primarily on the knapsack problem scenario, which may limit the generalizability of the results. It is recommended that the authors consider validation in different types of problems to enhance applicability.\n3.\tThe paper mentions the failure to find a positive impact of incentive mechanisms on human decision quality, and the explanation for this phenomenon is insufficient, leading to a superficial discussion of the incentive mechanisms without exploring their potential reasons."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "All my questions are listed in the weakness part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The studied problem is important. Developing a collaborative system that integrates ML models with human decision-making to consistently achieve better outcomes is both a valuable and challenging topic for academia and industry.\n2. A theoretical framework is proposed to describe the collaboration process and quantify the quality of solutions from both models and humans. A sufficient condition, which ensures non-decreasing utility, is provided to guarantee the achievement of a stable point.\n3. An empirical experiment was conducted with real users, offering interesting insights into practical applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a theoretical framework for describing the collaboration process between ML models and human decision-making. By defining a utility function and a collaborative characteristic function, it gives a sufficient condition for achieving a stable point in the optimal case. Additionally, an empirical experiment with real users offers interesting insights into practical applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe theoretical framework primarily aims to describe the problem. Both the theory and convergence conditions rely on acquiring the utility function, which seems to be merely achievable with the knowledge of the ground truth. However, as discussed in Introduction and Related Works, a key intuition of this paper is addressing the inaccessibility of ground truth in real-world scenarios. Consequently, the theory offers limited insights at the methodological level.\n2.\tSeveral expressions and derivations are unclear and lack rigor. E.g., it seems that $\\delta_{M_t}$ in Eq. (3) should be determined jointly by ${M_t}$and $X$. In Definition 3 and Proof A.6, $U(H(X, Y_{M_t}))$ should be instead be written as $U(X, H(X, Y_{M_t}))$ instead. Additionally, the logic behind the proof of Proposition 1 is unclear, particularly why $E_{x \\in \\mathcal{X}}(U(Y_{M_{t+1})}=0)$ holds. And Observation 2 is also confusing. Why should the absolute difference in distance measures equate to the difference in utilities?\n3.\tThe authors devote substantial space to describing the experiment and results related to monetary incentives. However, since these are empirical observations of a single confounding factor in a specific scenario, they provide limited insight and generalizability from a broader perspective"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a new framework for human-ml collaboration that considers distribution shifts and test it in a large user study."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Dynamic Model of Performative Human-{ML} Collaboration: Theory and Empirical Evidence},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4wmf3Ffhl2},\nnote={under review}\n}"
},
"abstract": {
"value": "Machine learning (ML) models are increasingly used in various applications, from recommendation systems in e-commerce to diagnosis prediction in healthcare. \nIn this paper, we present a novel dynamic framework for thinking about the deployment of ML models in a performative, human-ML collaborative system. In our framework, the introduction of ML recommendations changes the data-generating process of human decisions, which are only a proxy to the ground truth and which are then used to train future versions of the model. We show that this dynamic process in principle can converge to different stable points, i.e. where the ML model and the Human+ML system have the same performance. Some of these stable points are suboptimal with respect to the actual ground truth. As a proof of concept, we conduct an empirical user study with 1,408 participants. In the study, humans solve instances of the knapsack problem with the help of machine learning predictions of varying performance. This is an ideal setting because we can identify the actual ground truth, and evaluate the performance of human decisions supported by ML recommendations. We find that for many levels of ML performance, humans can improve upon the ML predictions. We also find that the improvement could be even higher if humans rationally followed the ML recommendations. Finally, we test whether monetary incentives can increase the quality of human decisions, but we fail to find any positive effect. Using our empirical data to approximate our collaborative system suggests that the learning process would dynamically reach an equilibrium performance that is around 92% of the maximum knapsack value. Our results have practical implications for the deployment of ML models in contexts where human decisions may deviate from the indisputable ground truth."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Human-AI Collaboration",
"Human-Computer Interaction",
"Dynamic Systems",
"performative prediction",
"strategic behavior",
"human-in-the-loop",
"dynamic learning",
"deployment strategies"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/30647409a3fecf97aa80f8333e8d6e3d1213ef69.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/11b96f2f5ae0305d4b1a52e9bbf37f42efbd1ca3.zip"
},
"title": {
"value": "A Dynamic Model of Performative Human-ML Collaboration: Theory and Empirical Evidence"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4wpqmhh05N | The Mutual Information Matrix in Hyperbolic Embedding and a Generalization Error Bound | main | Active | Hyperbolic embedding;Mutual information;Generalization error bounds | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;3;5 | 4;4;2;3 | 2;3;2;3 | 2;1;2;3 | 1;3;2;2 | 3.5 | 3.25 | 2.5 | 2 | 2 | -0.174078 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. 263-264: I don’t understand the reasons for setting $V_{w} = V_{c} = V$. Can you elaborate more on why this setting is used? If it is common practice, there should be some citations. \n\n2. It would be helpful to provide a clear definition of parsimony in Section 3.2"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper provides novel insights by studying the mutual information matrix in Skip-Gram Negative-sampling (SGNS) embeddings in hyperbolic space. In particular, demonstrating that distance in hyperbolic embeddings obtained by using SGNS equates to mutual information is an interesting finding that can motivate the use and further study of Poincaré embeddings in NLP. Additionally, the empirical result that hyperbolic embeddings are more unstable during training than their Euclidean counterpart and that more samples are needed to reduce training error can help guide further works in training hyperbolic embeddings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides both theoretical and empirical analysis of Skip-Gram Negative-Sampling (SGNS) embeddings in hyperbolic space. While SGNS traditionally embeds words and contexts in Euclidean space (Word2Vec), the authors extend this approach to hyperbolic space using Poincaré embeddings. Two types of errors are used to evaluate the embeddings: spatial error, which is influenced by the dimensions and structure of hyperbolic space, and generalization error, which measures the relationship between embedding error and sample size across different spaces. An empirical study of hyperbolic embeddings is conducted on WordNet and THUNews.\n\nThe authors investigate how hyperbolic distance relates to mutual information, deriving bounds on both spatial and generalization errors. Furthermore, they demonstrate that the distance, d(w,c), between w and c corresponds to the mutual information between w and c in a hyperbolic space. This finding helps to motivate the use of Poincaré embeddings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, the paper is extremely dense and difficult to follow because it provides little motivation or intuition for mathematical notation.\n\nI understand that one of the paper's main contributions is to provide a detailed mathematical analysis of the mutual information matrix in hyperbolic embeddings. Still, some detail is unnecessary in the main body of the paper and hinders the reader's ability to read the paper. For example, results such as those in section 3.1 that use straightforward algebraic computations to show that distance approximates mutual information should be moved to the appendix. \n\nWhile the paper provides some nice theoretical insights, the methods used for the evaluation of hyperbolic embeddings with Skip-Gram Negative-Sampling are not robust. Using the rank of the restored point-wise mutual information matrix as the sole metric to compare Euclidean hyperbolic embeddings is not particularly interesting. Investigating the performance of hyperbolic embeddings on word similarity tasks, e.g., WordSim-353 or SimLex999, would provide a meaningful quantitative comparison of using embeddings based in different spaces and help motivate the study of static, hyperbolic word embeddings. Further, comparing the performance of classification models that use standard Word2Vec embeddings and hyperbolic Skip-Gram Negative sampling embeddings would provide a much stronger motivation for the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "n/a"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. It directly replaces the distance/similarity measure in learning word2vec, which makes the approach easy to conceptualize.\n2. Under mild assumptions, the submission provides interesting generalization bounds."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "the paper proposed to replace the Euclidean embeddings learned in word2vec with Hyberpoblic embeddings specifically with Poincare geometry. The method is straightforward - rather than using dot-product, a Euclidean-space similarity measure, the submission measures the distance between two word vectors on a Poincare disk. However, the evaluation approach puzzles me."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "It puzzles me that there are many simple 'real-world'-ish datasets for evaluating learned word embedding, but somehow, the submission doesn't provide any of them. IMO, the submission conducts the study as if the problem is orthogonal to NLP.\n\n1. Having an understanding of the sample complexity and how the error bound of the estimation depends on the sample complexity is generally informative, however, in recent years, we have found ourselves in a wacky situation that, for a model to generalize, the training loss just needs to be small, but it doesn't need to be very small, because many plateaus in the loss landscape provide models with good generalization, thus, having a theoretical understanding of the loss function or the error bound becomes somehow outdated.\n\n2. A crucial aspect or consideration of learning on massive corpora is the complexity of the algorithm itself, which the submission doesn't mention.\n\n3. The submission didn't use common datasets for learning word embeddings, nor does it provide any evaluation on common benchmarks, e.g. SimEval or SentEval."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "__Questions__:\n- Why choose 400 Euclidean dimensionality and 2 for Poincare?\n- Table 1,2,3: I don't really understand the reported numbers (what is the distance function exactly in Table 1? What is the distance in Table 2?). I suggest you give an explicit interpretation of those numbers to make it clear to the reader\n- There is a conclusion that training with hyperbolic embeddings takes more time and iterations. However, it's unclear from your experiments if Poincare space embeddings can achieve the same loss as Euclidean ones with a higher number of samples (or iterations) (Table 6 and Table 7) or it is still behind the Euclidean embeddings\n- Lime 418: `Moreover, hyperbolic space requires more than 70,000 samples to achieve adequate training`: what is _adequate_? How do you define it? \n\n__Writing__:\n- Table 7 has the incorrect title. It's 400-dimensional Euclidean space, not Poincare space"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- I strongly believe exploring the embedding spaces beyond Euclidean space is crucial for the field\n- Theoretical and empirical results are provided\n- Reflection on the advantages (low-dimensionality) and disadvantages (training instability, large sample size etc.)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Hyperbolic embeddings were introduced in the literature as an alternative to the embeddings in Euclidean space. This paper provides an analysis of the skip-gram embedding model in hyperbolic space. The authors offer their take on many dimensions of the hyperbolic embeddings, including their connection to the mutual information matrix, generalization capabilities (with theoretical proof), and required sample size/training stability. Theoretical results are further supported by empirical results on two datasets: Wordnet and THUNews."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Although it's crucial and interesting to explore various properties of hyperbolic embeddings, they do not exist in a vacuum, so it would be useful to see the performance of the embeddings on downstream tasks\n- Provided experimental setup and results are hard to follow (see questions)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See my questions in Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1) Connecting hyperbolic embedding with mutual information is interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discusses the relationship between the point-wise mutual information matrix and the hyperbolic distance. Furthermore, the authors establish generalization error bounds for hyperbolic embedding. These bounds demonstrate the dimensional parsimony of hyperbolic space and its relationship between the generalization error and the sample size. Experiments on the Wordnet dataset and the THUNews dataset validate the theoretical properties."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The motivation of connecting hyperbolic embeddings and PMI is unclear. Both are distance measure, hyperbolic distance captures similarity of hierachies, while PMI quantifies the discrepancy between the probability. What do you mean by the “equivalence between the Gramian matrix in hyperbolic embedding and the dimension of the space”? \n2) What is the research questions that you want to answer in the Experiment section? The authors said that the theoretical findings are evaluated by conducted the experiments. However, it is unclear how the experimental results related to the theoretical findings. Which theorems (theorem 1 or 2? ) you want to answer? It would be much clear if the authors list the research questions. What do you really want to evaluete and compare. \n3) I could not understand what do the tables in the experiment section want to tell us? perhaps the authors want to show some correlation between dimension and mutual information matrix? then it is better to plot it with some line plots."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We analyze the relation between hyperbolic embedding and mutual information, and give a generalization error bounds for hyperbolic embedding."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024the,\ntitle={The Mutual Information Matrix in Hyperbolic Embedding and a Generalization Error Bound},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4wpqmhh05N},\nnote={under review}\n}"
},
"abstract": {
"value": "Representation learning is a crucial task of deep learning, which aims to project texts and other symbolic inputs into mathematical embedding. Traditional representation learning encodes symbolic data into an Euclidean space. However, the high dimensionality of the Euclidean space used for embedding words presents considerable computational and storage challenges. Hyperbolic space has emerged as a promising alternative for word embedding, which demonstrates strong representation and generalization capacities, particularly for latent hierarchies of language data. In this paper, we analyze the Skip-Gram Negative-sampling representation learning method in hyperbolic spaces, and explore the potential relationship between the mutual information and hyperbolic embedding. Furthermore, we establish generalization error bounds for hyperbolic embedding. These bounds demonstrate the dimensional parsimony of hyperbolic space and its relationship between the generalization error and the sample size. Finally, we conduct two experiments on the Wordnet dataset and the THUNews dataset, whose results further validate our theoretical properties."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Hyperbolic embedding",
"Mutual information",
"Generalization error bounds"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9c9f6370badd68f1aad3b3bd5314a4a60d120fe6.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "The Mutual Information Matrix in Hyperbolic Embedding and a Generalization Error Bound"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4wtcXV0kbi | S7: Selective and Simplified State Space Layers for Sequence Modeling | main | Active | state space models;neural network architectures;deep learning architectures;sequence modeling;event-based vision;event cameras;neural odes | learning on time series and dynamical systems | 3;3;3;5 | 5;5;4;4 | 2;2;2;3 | 1;2;1;2 | 2;1;2;2 | 3.5 | 4.5 | 2.25 | 1.5 | 1.75 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The S7 should combine the advantages of the S5 and S6, so it needs to be compared with both. E.g., why is it simpler than the S6? Training faster? Fewer parameters?\n2. Why did the introduction of selective matrix degrade the effectiveness so much on long sequence tasks like path-X in LRA, compared to S5 (Line 402)? More analysis is needed, otherwise the S7's improved sequential modeling capabilities using input-dependent matrix don't seem useful and convincing.\n3. Prior work showed that the performance of deep state space models are sensitive to the initialization of the state matrix. [1] Have you done experiments with different initializations to verify the robustness of the S7 module? Along as the experiments of effectiveness of Stable Reparameterization for Long-Term Dependencies.\n\n[1] Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in Neural Information Processing Systems, 34, 2021b."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. It turns out that the combination of S5 and S6 is helpful for SSM structure and contributes to the development of SSMs.\n2. Experiments are conducted on many datasets together with extensive analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a new SSM called S7 for sequence modeling. It combines the strengths of S5 (simpler structure) and S7 (input-dependent state transitions) and incorporates stable reparameterization of the state matrix. Many experiments were carried out to verify the efficiency and performance on different datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The core contribution of this paper seems to be successfully combining S5 and S6, with many experiments being carried out. However, the paper lacks a detailed introduction to the S5 and S6, so readers may not be completely clear about the advantages and disadvantages of these two models, and how S7 surpasses the two. E.g., why to say \"S6 introduces hardware-specific complexity\" (Line 88)? Some details of S5 and S6 can be shown clearly in 3.1 Background.\n2. An intuitive comparison of the S4 (S4D or DSS), S5, S6, and S7 schematic can be given, to clearly show the development and difference of SSMs. Or a similar part like 4. RELATIONSHIP BETWEEN S4 AND S5 in S5 paper [1].\n3. The effectiveness of Stable Reparameterization of S7 seems not to be verified. More introduction to the initialization of the state matrix should be given, or the writing logic of Stable Reparameterization for Long-Term Dependencies (Line 211) is too hard for readers to follow.\n\n[1] Smith, J. T., Warrington, A., & Linderman, S. W. (2022). Simplified State Space Layers for Sequence Modeling. ArXiv. https://arxiv.org/abs/2208.04933"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tWhat does S7 mean? (is the ”7” with some particular meaning?)\nOther questions please see the above weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper proposes an input-independent mechanism for SSM, with stable reparameterization techniques, and it provides the stability of this reparameterization through comprehensive theoretical derivations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an SSM (state space model) architecture call S7, to provide an input-dependent mechanism for an existing work (S5), and showing this architecture can efficiently and accurately deal with sequence modeling tasks. The experiments show it performs better than Mamba(S6) in LRA tasks, while worse than other SSM-like models, and show it achieves good performance in neuromorphic datasets and dynamic prediction tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe novelty is not very clear to me, as mentioned in the paper, it is claimed that this paper provides efficient input-dependent state transition dynamics based on S5, but conceptually, it is the same as what Mamba (S6) did for S4, that introduced learnable matrices A, B, C\n2.\tThe paper claims this S7 is more efficient than Mamba, but I did not find any experiments data on the efficiency comparison with Mamba/Mamba2. Theoretically, without using parallel technologies like selective scan or others, how could one run the S7 in an efficient way when at each time step one needs to update the dynamics of A, B, C, D? \n3.\tThe experiments do not well support the claims: the performance of S7 in LRA is substantially worse than many other methods, and there’s no results showing it’s better than mamba in general language modeling tasks (which is an important selling point of mamba-like models). This leads to a question: what is the use case of S7?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. **Could the authors clarify the novelty of the reparameterization?** How does it differ from StableSSM’s reparameterization framework?\n\n2. **Why was the S5 model chosen as the basis for S7?** Given that S6 with the StableSSM reparameterization might provide similar benefits, what informed this design choice?\n\n3. **Could the authors specify if the neuromorphic-specific design applies solely to neuromorphic tasks or to all benchmarks?** This would improve clarity regarding the model's consistency across different tasks.\n\n4. **What is the author’s perspective on improving the model's performance on data whre time-dependance is note relevant?** Given S7’s limited success on LRA, is there a feasible modification that could address these challenges while preserving input-dependence?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "*Input-Dependent Dynamics:* S7’s adaptation of the S5 model to be input-dependent is a promising approach. This could enhance the model’s responsiveness to input variability, a significant issue in long-range sequence tasks.\n\n*Stable Reparameterization:* The model claims to maintain gradient stability over long sequences, addressing gradient explosion/vanishing issues commonly faced in deep learning. This feature has potential benefits for training efficiency and stability.\n\n*Broad Applicability:* S7’s successful application across various domains, from physical time series to neuromorphic vision, suggests it may generalize well to different task types."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents S7, a simplified state-space model (SSM) designed for long-range sequence tasks. Building on the S5 model, it introduces input-dependence to allow dynamic adjustments to state transitions based on input content. The paper claims S7 achieves stable reparameterization and efficient performance across diverse tasks, including neuromorphic event-based datasets and Long Range Arena (LRA) benchmarks, without requiring the complexity of models like S4 or S6 (Mamba). The proposed model is argued to balance computational simplicity with adaptive filtering and content-based reasoning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Limited Novelty:** The paper is introducing an input-dependent update mechanism (already introduced by S6) stabilized through the reparameterization framework and key equations (Eqs. 6, 7, and 8) borrowed directly from StableSSM [1], raising concerns about the originality of the contributions. The paper only states that it was “inspired” by stable reparameterization, yet much of the core methodology relies on prior work.\n\n\n**Inconsistent Notation:** The notation for $𝐴_k$ is unclear, with dependency on input appearing inconsistently (e.g., it appears in Eq. 5 and line 267 -- e.g. $𝐴_k(u_k, \\theta_m)$ but is omitted elsewhere -- e.g. $𝐴_k(\\theta_m)$. This lack of uniformity in notation undermines the model’s theoretical presentation.\n\n**Weak Justification for S5 Model Selection:** S5 is mentioned as the basis for S7, but no rationale is provided for not using S6 (Mamba) and the reparameterization technique from StableSSM. Moreover, no connection or description of the S5 model is given (MIMO approach etc.)\n\n**Assumptions Clarity:** Assumptions (3.1, 3.2, and 3.3) are not well justified or examined for feasibility, and the text lacks guidance on implementing or verifying these assumptions. This leaves important theoretical aspects of the model unaddressed.\n\n**Unclear Contribution of Neuromorphic Design:** The neuromorphic-specific design choices in Section 3.4 seem disconnected from the rest of the model’s development (no other mention on the first part of the paper). It’s unclear whether these additions (Eq. 11, 12) apply exclusively to neuromorphic tasks or extend to other benchmark tasks.\n\n**Lack of Benchmark Justification:** The paper does not clarify why specific datasets were chosen. For instance, given the input-dependent nature of S7, benchmarks used by similar models like Mamba (e.g., Selective Copy or Induction Heads or other similar benchmarks -- see Section 7/Table 4 of the thematic survey [2]) might have been more appropriate for comparison.\n\n**Poor Performance on LRA Benchmarks:** S7’s subpar performance on LRA benchmarks raises concerns about its applicability to heterogeneous data. The authors provide only a brief discussion, without substantial insight or proposed solutions for improving performance on these challenging tasks.\n\n\n[1] Wang, Shida, and Qianxiao Li. \"Stablessm: Alleviating the curse of memory in state-space models through stable reparameterization.\" arXiv preprint arXiv:2311.14495 (2023).\n\n[2] Tiezzi, Matteo, et al. \"State-Space Modeling in Long Sequence Processing: A Survey on Recurrence in the Transformer Era.\" arXiv preprint arXiv:2406.09062 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Is the code to reproduce the results publicly available?\n- How is lambda_k computed?\n- Fig.1 delta_k is not defined in the caption\n- Line 104-109 this sentence is too long and could be split in 2-3 smaller ones.\n- What is the performance of other SSMs in Table 5, 6 and 7?\n- In which regards in the S7 simpler or superior over other SSMs? Number of flops? Number of parameters?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors evaluate their model on a series of benchmark tasks\n- The authors provide some theoretical analysis for the training stability"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel state-space model (SSM) called S7 that introduces input-dependent dynamics to filter and retain relevant input information over extended time horizons. An important part of S7 is a reparametrization trick that ensures training stability. S7 is claimed to reach a balance between performance and model's complexity for processing very long sequences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Scientific novelty of the manuscript: the authors base their work on the existing S5 model and extend it with the input-gating, that other models, such as Mamba already have demonstrated. In fact, it remains even unclear when reading through the main paper how the input-gating is realized precisely. The reader misses those equations. I.e., How is Lambda_k computed?\n- The selection of the tasks and in particular the selection of the reference models is a major weakness of the model:\n 1. since the S7 model is based on the S5 model, it is of paramount importance that one always compares to S5 at least, which the authors do not do for many datasets they considered. For example, this comparison is missing in Table 5, 6 and 7 (wrongly called Figure 2 in the manuscript).\n 2. looking at the LRA results, one can see that S7 is only in 2 tasks slightly better than S5, but in the remaining tasks it is significantly worse. Moreover, in Table 5, 6 and 7 (wrongly called Figure 2 in the manuscript), the authors don’t even compare to the S5.\nthe authors claim to introduce a simpler model than Mambda, but it remains unclear in what regards it is simpler, e.g., if it uses less number of parameters, or less computations, this needs to be demonstrated in the results.\n- Some minor comments are: The authors are very much overstating their novel contributions with terms such as “S7 has demonstrated its superiority”, which is by no means true. Figure 2 on page 10 should probably be Table 7"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A new sequence model for long sequence modeling employed on various tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024s,\ntitle={S7: Selective and Simplified State Space Layers for Sequence Modeling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4wtcXV0kbi},\nnote={under review}\n}"
},
"abstract": {
"value": "A central challenge in sequence modeling is efficiently handling tasks with extended contexts. While recent state-space models (SSMs) have made significant progress in this area, they often lack input-dependent filtering or require substantial increases in model complexity to handle input variability. We address this gap by introducing S7, a simplified yet powerful SSM that can handle input dependence while incorporating stable reparameterization and specific design choices to dynamically adjust state transitions based on input content, maintaining efficiency and performance. We prove that this reparameterization ensures stability in long-sequence modeling by keeping state transitions well-behaved over time. Additionally, it controls the gradient norm, enabling efficient training and preventing issues like exploding or vanishing gradients. S7 significantly outperforms baselines across various sequence modeling tasks, including neuromorphic event-based datasets, Long Range Arena benchmarks, and various physical and biological time series. Overall, S7 offers a more straightforward approach to sequence modeling without relying on complex, domain-specific inductive biases, achieving significant improvements across key benchmarks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"state space models",
"neural network architectures",
"deep learning architectures",
"sequence modeling",
"event-based vision",
"event cameras",
"neural odes"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1fffab6aa0f02c9b5326306d49f2faa5435961a9.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "S7: Selective and Simplified State Space Layers for Sequence Modeling"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4wuvmJRAU4 | Interfering with Interference: Blind Shuffling and Superposition for Better Multi-Model Compression | main | Active | Task Arithmetic;Superposition;Model Merging;Multi-model Compression;Model Serving | transfer learning, meta learning, and lifelong learning | 3;5;6;6 | 5;3;4;4 | 2;2;3;3 | 3;2;3;3 | 2;2;3;2 | 5 | 4 | 2.5 | 2.75 | 2.25 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* Simplicity and Effectiveness: One of the major strengths of this paper lies in its approach’s simplicity. Layer shuffling and task vector superposition are straightforward yet powerful techniques that effectively reduce interference without needing additional training, optimization, or complex configurations. This simplicity not only enhances the practicality of the approach but also makes it easy to implement and adapt across various multi-model compression tasks, proving that even minimal adjustments can yield significant performance improvements.\n\n* Effective Interference Reduction: The combination of layer shuffling and task vector superposition is innovative in addressing interference by increasing orthogonality among task vectors. This approach allows for a more effective merging process, yielding improved model accuracy without the need for additional optimization or training steps.\n\n* Adaptability and Scalability: The proposed method’s flexibility is a clear strength. Its data and model-independent nature enables seamless additions and removals of models (hot-swapping) without re-computation, a valuable feature for dynamic applications. Moreover, the approach is efficient, doubling the memory footprint while providing significant accuracy improvements.\n\n* Comprehensive Evaluation: The experiments cover a range of benchmarks and tasks, showcasing the model’s capability across various domains, from image classification to text generation. This breadth of evaluation helps establish the generalizability of the method across tasks and model architectures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces two methods, layer shuffling and task vector superposition, aimed at reducing interference between task vectors in multi-model compression scenarios. The proposed methods work by increasing the orthogonality of task vectors, thus minimizing their interference during merging. By leveraging randomization, these methods require no additional training and can be applied across various models and tasks. Experiments on multiple benchmarks, including CLIP, Flan-T5, and GPT-2, demonstrate that this approach can achieve comparable performance to fine-tuned models while reducing storage costs, particularly for real-world deployment scenarios where adding or removing models on the fly is necessary."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Lack of Detailed Performance Analysis Based on Shuffle/Superposition Levels: It would be useful to analyze the impact of different levels of shuffling and superposition, as these levels could influence task vector similarity and interference differently. This analysis would provide a clearer picture of optimal interference reduction strategies.\n* Clarity Issues in Method Description: Some aspects of the method, such as the merged task vector formation in equation (8), could benefit from further clarification. Specifically, does shuffling task vectors in different layers cause mixing of task vectors across layers, for instance, between k-1 or k+1? Clarifying this would enhance understanding of how the shuffle affects layer-specific task vector alignment.\n* Effectiveness Across Tasks: The effectiveness of either TA+Shuffle or STA appears to vary by task, yet the paper does not discuss why some tasks benefit more from specific strategies. A more in-depth analysis here would provide insights into optimizing methods based on task characteristics.\n* Related Work Reference (PEFT): (Maybe, long shot) this paper is related? \"Efficient Storage of Fine-Tuned Models via Low-Rank Approximation of Weight Residuals,\"\n* Minor Formatting Issues: There are some minor formatting errors in the document, such as incorrect Latex punctuation and inconsistent reference formatting. For example, equation 3 is mistakenly referenced in the context of equation 4, and parentheses are missing in certain citations. Additionally, clarifying what the values in parentheses mean in tables, such as in the average (%) and Bits (Gb) columns, would be helpful, as it currently requires reading the text to understand that they refer to relative performance to fine-tuned models."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. why is 5.03 the number of Gb attributed to fine-tuned? shouldn’t it be 8x the pre-trained model?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The method is intuitive and simple. The motivation for both components is well written and properly ablated.\n2. The method is scalable and memory efficient (modulo the duplication of model parameters) given that it only requires the storage of random seeds to retrieve the final model.\n3. The experimental results are strong across benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes two stochastic mechanisms to improve performance for multi-task model merging by reducing task interference. First, the method takes advantage of the repeating structure of modern neural networks and randomly shuffles the same-module layer across blocks by first showing that the layers are mostly similar in the within-block across tasks. Second, the paper proposes random binary matrices to multiply parameter vectors to further reduce the task vector similarity. During inference, the inverse transforms are applied. The paper performs experiments across diverse benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The method has a limitation that is not discussed a lot, apart from the title: it requires the knowledge of the task id during inference. This needs to be underlined during the comparison with methods such as ties and task arithmetic for fairness.\n2. lack of forward pass time complexity comparison. The proposed method introduces an overhead in the forward pass: layers need to be reshuffled in the correct order, signs need to be restored and the residual needs to be added to the pre-trained weights. Therefore, there should be a study of how much overhead all these operations incur.\n3. Missing baselines: Given the parameter increase and the time complexity overhead, the paper should compare with the compression algorithm of [1].\n4. The paper solely focuses on small models, base variants on ViT and Flan-T5, but the literature uses ViT-L/14 and T5-XXL regularly. It would also be interesting to check the performance of the method as tasks increase, see 14 and 20 task benchmarks from [1]. It would be interesting to also track the forward pass time metrics in the case of larger models.\n5. L268-269: fix references for benchmarks: the vision one for instance comes from Ilharco et al. and not from FusionBench\n6. Baselines and their categorization are not explained and the reader cannot understand why PSP ins included given its poor results or what WEMoE and SMILE are on their own category compared to everything else. It would be helpful for the reader to provide a brief description of each method as well as a high level overview of the categories to help the reader understand rather than deferring to the appendix where they are actually not discussed.\n7. Extremely limited Related work: the quality of the paper is heavily undermined by the lack of proper references and discussion over related work.\n\nMinor Comments\n\n\n- L202: ontain → obtain\n- Rephrase informal writing:\n\t- L217: “which we expect to be much lower”\n\t- L150: “but this balance has generally been tricky to achieve”\n\n[1] Wang, K., Dimitriadis, N., Ortiz-Jimenez, G., Fleuret, F. and Frossard, P., 2024. Localizing Task Information for Improved Model Merging and Compression. *arXiv preprint arXiv:2405.07813*."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q1: Although randomization offers clear advantages like data independence, would a more systematic approach to orthogonalizing task vectors further improve the performance?\nQ2: Did you observe any (in-)consistent performance variance due to randomness in shuffling and superposition?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper makes an observation that individual task vectors are too similar and successfully uses it to reduce task vector interference, leading to better multitask performance in model compression scenarios.\n- Both proposed techniques operate without needing data, allowing flexible model addition or removal without retraining or optimization.\n- The method achieves storage reduction compared to keeping individual models.\n- The approach is shown to improve performance across diverse domains including image classification, text generation, and text classification.\n- The method enables on-the-fly model integration, allowing seamless \"hot-swapping\" of models.\n- The paper is very well written and clearly structured."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces two methods, Layer Shuffling and Task Vector Superposition, aimed at reducing interference when compressing multiple fine-tuned models into a single multitask model using task vector arithmetic. Layer Shuffling works by randomly reordering layers in each model before merging, reducing alignment between task vectors. Task Vector Superposition applies random orthogonal transformations to further decorrelate task vectors. Both techniques minimize interference and improve performance across tasks. Experiments with CLIP-ViT, Flan-T5, and GPT-2 show that this approach achieves higher accuracy than vanilla task arithmetic and other baseline methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the paper compares its method to several baseline techniques, it misses comparison with closely related recent works, particularly Guillermo Ortiz-Jimenez et al.'s work (mentioned in the paper) on task vector manipulation for model merging. Including these comparisons would strengthen the submission.\n- Although the authors claim minimal memory overhead, additional context matrices and shuffled task vectors nearly double the memory requirement, which may not always justify the marginal performance gains over baselines like SMILE.\n- LoRA results show that SMILE achieves a better tradeoff between accuracy and memory than the reported combination of the proposed methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see the above weakness part"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper presents a novel approach to multi-model compression through random mechanisms\n\n- The empirical evaluation is conducted across multiple benchmarks to demonstrate the effectiveness of the method"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Layer Shuffling and Task Vector Superposition, two random mechanisms to reduce interference in multi-model compression by increasing orthogonality between task vectors. The methods achieve near-identical accuracy to individual fine-tuned models while reducing storage costs by 4 times and enable seamless hot-swapping of models without recomputation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The current presentation and writing require significant improvements. For instance, the mathematical analysis is overly simplistic and does not warrant extensive explanation. Additionally, the proposed method lacks a rigorous proof demonstrating why Layer Shuffling specifically enhances orthogonality more effectively than other potential random transformations.\n\n- The interaction between Layer Shuffling and Task Vector Superposition isn't thoroughly analyzed as it's unclear whether they're truly complementary or if one method dominates the benefits\n\n- The experiments are not convincing because the models used for comparison are generally much smaller, leading to expected inferior performance from competitors. Meanwhile, the authors' model is significantly larger, resulting in better performance, which does not necessarily demonstrate an advantage."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present two complementary random mechanisms to significantly reduce interference when eliminating cross-model redundancy for efficient multi-model serving: Layer Shuffling and Task Vector Superposition."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024interfering,\ntitle={Interfering with Interference: Blind Shuffling and Superposition for Better Multi-Model Compression},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4wuvmJRAU4},\nnote={under review}\n}"
},
"abstract": {
"value": "We present two complementary random mechanisms to significantly reduce interference when eliminating cross-model redundancy for efficient multi-model serving: _Layer Shuffling_ and _Task Vector Superposition_. They work together to increase the orthogonality among interfering task vectors, forcing them into self-destruction without requiring any post-training learning or optimization. _Layer Shuffling_ randomly reorders layers of each individual models to reduce the alignment between interfering task vectors. While _Task Vector Superposition_ leverages random orthogonal transformations to decorrelate task vectors further. Together, these techniques drastically minimize interference, yielding improved performance across multiple tasks with effectively zero incremental memory cost when incorporating new models. Their data and model-independent nature also allows for seamless on-the-fly addition or removal of models, without requiring any re-computation, making them highly practical for real-world deployment scenarios."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Task Arithmetic",
"Superposition",
"Model Merging",
"Multi-model Compression",
"Model Serving"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a03ddaa98be0df5602b6392b114f90e81d3b40d8.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Interfering with Interference: Blind Shuffling and Superposition for Better Multi-Model Compression"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4xBew7kuYB | Studying the Effects of Training Data on Small Language Models | main | Active | small language models;pretraining | other topics in machine learning (i.e., none of the above) | 1;6;6 | 4;3;4 | 1;4;2 | 1;2;3 | 2;3;3 | 4.333333 | 3.666667 | 2.333333 | 2 | 2.666667 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weaknesses.\n\nTypos:\n1. line 19: propeties -> properties\n2. line 86: exihibit -> exhibit\n3. line 527: thire -> their"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper tested a very meaningful assumption of whether simple language in training data can lead to better generation abilities of SLMs.\n2. The readability measurement approaches are comprehensively studied and analyzed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors investigates the impact of training data's readability to the generation abilities of very small language models (SLM). They challenge the claim that training SLMs on simple language is the reason for their ability to generate coherent text. They create synthetic corpora with varying level of readability, and found no impact to the coherence of text generated by SLMs, and also found training on simple language does not lead to earlier development of coherence during training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The quality measurement is limited to perplexity and coherence (coherent, according to the llm-as-judge prompt, is considered \"well-structured and well-organized\", \"not just a heap of related information, but should build from sentence to sentence\"). The ignorance of other dimensions of quality (for example, as authors also mentioned, clarity and fluency) makes any statements about \"generation abilities of SLMs\" an overclaim.\n2. The quality measurement doesn't use any metrics from the original TinyStories paper: grammar, creativity, consistency with the beginning of the story (Eldan & Li, 2023). That makes the results from the two papers in comparable. Because of that, there is no evidence that \"SLMs trained on data with substantially more complex language also exhibit the same abilities as those trained on simple language\" can also hold the measurement in Eldan & Li (2023).\n3. While the authors rule out some factors not contributing to coherent SLMs, it is unclear what factors are contributing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I would love to hear the authors' response to my interpretable of the tables / figures, in case there is any misunderstanding.\n\nI am open to raising my score if there is a strong argument for why correcting this misunderstanding is important for the community, as it is my main concern about the paper."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The paper is cleanly scoped and clearly written.\n- It corrects a widespread misinterpretation of a result in the NLP literature. This result has been used to motivate LM development inspired by human language learning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the question: are small LMs capable of learning TinyStories because it is *readable* (i.e., simple vocabulary, concepts, and grammatical structures) or some other feature, notably the dataset's lack of diversity (templated sentences with uniform structure)? The authors of TinyStories, and subsequent citations, only consider the former interpretation, but there is no evidence to eliminate the latter.\n\nThis paper carefully investigates this question by generating two datasets with the same synthetic data generation process, differing only in the vocabulary and the intended audience that the model is asked to use & consider. They call these two datasets $\\\\texttt{LlamaTales-Jr}$ and $\\\\texttt{LlamaTales-GRE}$. The two datasets are equally coherent, but $\\\\texttt{LlamaTales-Jr}$ is much more readable. They find that small LMs are *equally* capable of learning both $\\\\texttt{LlamaTales-Jr}$ and $\\\\texttt{LlamaTales-GRE}$, showing that *readability* does not necessarily explain small LMs' ability to learn TinyStories. Instead, they hypothesize it is the lack of diversity in the data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The scope of the paper is relatively narrow. While it shows that the community has widely misinterpreted the results of a particular paper, it's not clear how much it matters. Moreover, I believe the main surprising finding of $\\\\texttt{TinyStories}$ still stands, which is that SLMs are capable of learning the language of 3-4 year olds (regardless of why).\n- I believe the overall paper can use some reorganization.\n - I find it odd that §3 and §4 (which are all about measuring the readability and quality of the existing dataset, $\\\\texttt{TinyStories}$) are ordered before §5 (about constructing the datasets used in this paper). Wouldn't it make more sense to first describe the data creation methodology, *then* validate that they have the expected readability and quality? Right not, we don't get to the meat of the paper until halfway through page 7.\n - The connection between figures and claims in the running text of the paper is all over the place. For instance, most of the main claims in §3 and §4 are supported by figures in the Appendix.\n- The presentation of tables and figures can be more readable.\n - Figures 2, 3, 6 are hard to interpret due to lack of textual explanation, and I think there must be a better way to present the results. My understanding is that in Figure 2, I should see that in (b), the *green* dots (SLMs trained on $\\\\texttt{LlamaTales-Jr}$) are approximately as high as the best gray dots (LLMs), and in (c), the *blue* dots (SLMs trained on $\\\\texttt{LlamaTales-GRE}$) are ALSO approximately as high as the best gray dots (LLMs). Wouldn't it be better for these to be on the same axes, so the reader can compare directly whether LlamaTales-GRE is as learnable as LlamaTales-Jr? Subplots (a) for $\\\\texttt{TinyStories}$ and (d) for $\\\\texttt{FineWeb}$ should be in the Appendix, since they aren't used to support the main claims. I'm not sure what Figure 3 is doing in the main paper, since it's not discussed in the running text.\n - Table 1 contains results for many metrics which are not discussed in the running text of the main paper. To prevent reader confusion, I recommend moving the results for these metrics to the Appendix, where the metrics are described. The different metrics also don't seem to tell a different story.\n - I recommend a table with examples from $\\\\texttt{LlamaTales-Jr}$ and $\\\\texttt{LlamaTales-GRE}$."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "When measuring quality, you use a set of open models. Why not simply use a state of the art model such as GPT-4o or Claude3.5 instead?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The paper provides a new dataset with some added features in relation to a previous dataset (TinyStories)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discusses the effects on training small language models by changing some features related to the concept of readability of one particular dataset. The experiments show no effects."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The scientific contribution of this paper is limited, as it tackles a very narrow (and somewhat artificial) research question, and the experiments show no discernible effects whatsoever. The research question is somewhat artificial in the sense that the concept of readability (in humans) concerns the cognitive load of *interpreting* a text, which is not the same thing as *learning* a statistical language model from a text. In particular since readability is usually defined in terms of features related to frequency and length of individual tokens, but the paper does not discuss the influence of tokenization on the learning abilities of language models. It is therefore not at all clear (to me) why the concept of readability would have anything at all to do with how well a statistical language model performs. The experiments included in the paper confirms that it does not. The paper also contains an experiment that shows that the concept of readability and the concept of text quality (as interpreted in terms of perplexity and coherence) are unrelated, which is exactly what you would expect given the definition of these concepts. As such, it is difficult to see what novel knowledge this paper contributes with."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024studying,\ntitle={Studying the Effects of Training Data on Small Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4xBew7kuYB},\nnote={under review}\n}"
},
"abstract": {
"value": "Prior work has found that training very small language models (SLMs) on synthetic children's stories allows them to generate coherent text, comparable to much larger models. These stories are claimed to encompass the vocabulary and factual knowledge base of a 3-4 year old child, capturing the \"essence of natural language.\"\nBecause of these claims, it is tempting to attribute the findings to the simple language of children's stories, drawing a parallel to how children learn language.\nIs the human concept of readability relevant in the context of language model training, or are these findings better explained by other propeties of the data?\nIn this study, we investigate this by first validating several automatic readability measures. We then create synthetic corpora with varying levels of readability and assess the coherence of text generated by SLMs trained on these corpora.\nWe find no relationship between the readability of training data and the generation abilities of SLMs. Specifically, SLMs trained on data with substantially more complex language also exihibit the same abilities as those trained on simple language. Moreover, training on simple language does not lead to earlier development of coherence during training."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"small language models",
"pretraining"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/407060a3df878a3a13c95e416ede391a6d5cd987.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Studying the Effects of Training Data on Small Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4xEACJ2fFn | Is the sparsity of high dimensional spaces the reason why VAEs are poor generative models? | main | Active | variational autoencoder;generative model;high dimensional statistics;spin glass;latent space;hyperspherical coordinates | generative models | 3;3;3;5;5 | 3;5;3;4;3 | 2;2;2;2;2 | 2;2;2;3;2 | 3;3;3;3;3 | 3.8 | 3.6 | 2 | 2.2 | 3 | -0.102062 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Have you tried running experiments without angular constraints but only radius constraints (to test against the lower variance of the prior effect)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tThe introduction of hyperspherical coordinates to better capture samples obtain from high dimensional Gaussians is useful especially in the context of constraining the latent variables\n2.\tThe paper attempts to establish an ambitious connection between replica symmetry breaking in spin glasses and their proposed modifications of the loss function that avoids ‘high sparsity’ equatorial regions of the hypersphere."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose to convert the latent variables of a VAE to hyperspherical coordinates. This allows them to constrain samples from the prior distribution to a small region in latent space especially if the latent dimension is large. They provide experimental evidence that this improves the performance of the VAE when generating new data. They also provide some theoretical justification arguing that the sparsity of the latent space impairs the smoothness of the latent manifold."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe connection to spin glasses is made via a formal similarity of the energy function of a spin glass and their regularization term (equation 8). This connection appears weak as it does not explain: a) how the regularization helps escaping local minima that correspond to low-quality outputs of the VAE, b) the effect on Parisi’s order parameter which seems at the heart of the spin glass theory of neural networks, and c) the role of temperature i.e., the learning rate in the proposed scheme by which desired low-entropic states are reached.\n2.\tI am not sure that sparsity of the latent representation is the root cause of poor generative performance in VAEs. “Posterior collapse” seems a more likely explanation (also of the empirically observed improvements in section 4) as the proposed constraints not only compress the volume but also simply decrease the variance of the prior distribution. \n3.\tAs a minor point, in line 189-190 the authors see sparsity as an impediment to learning a data representation as a smooth manifold. I am not sure I agree unless the objective would be to explicitly construct the manifold (e.g., via simplicial complexes). But the manifold hypothesis (like the spin glass model) is only a conceptual aid (of how to think about VAE representations of data) not a fully developed theory from which algorithms and their convergence properties can be derived."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you provide comparisons with hyperspherical VAEs or other structured latent space models?\n2. Have you explored interpolation in the latent space to support your density claims?\n3. Why were only two datasets used? Including more diverse datasets could better validate your method.\n4. Why would a latent space as tightly concentrated as shown in Figure 1 be desirable? This remains unclear."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "• Innovation and Relevance: The use of hyperspherical coordinates in latent space with principles drawn from statistical physics is a unique and potentially impactful innovation.\n• Clear Problem Statement: The paper presents the problem of sparse latent spaces in VAEs clearly, providing a well-motivated solution.\n• Practical Usability: The proposed method integrates easily with current VAE models and introduces a manageable computational overhead.\n• Clarity of Presentation: The visual support, especially in Figure 1b, effectively illustrates the approach, aiding in understanding the latent space adjustments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an enhancement to Variational Autoencoders (VAEs) by introducing a hyperspherical latent space with a novel loss function. The method aims to improve generative quality by concentrating embeddings in denser latent regions, moving them away from the equatorial band often associated with sparsity. The approach offers compatibility with existing VAE structures, requiring minimal adaptation to the standard VAE framework."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "• Limited Experiments: The method is only evaluated on two datasets (MNIST and another dataset). More examples of generated samples and interpolations would better support the claim of improved density in latent space.\n• No Comparison with Relevant Competitors: The paper lacks comparative experiments with established hyperspherical VAEs, such as Davidson et al. (2018), or with Riemannian approaches like “A geometric perspective on VAEs” by Chadebec and Allassonniere.\n• Inconclusive Results: The results in downstream tasks like classification are mixed, and the paper’s claims of denser latent space do not\nconsistently reflect in performance metrics.\n\nWhile the paper offers valuable theoretical insights, additional empirical support is needed to substantiate the generative benefits relative to state-of-the-art sampling methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the weakness section above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well written. \n- The relevant works are clearly listed. \n- The idea of drawing insights from high dimensional physical systems is interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new way to formulate the latents of VAE on hyperspherical coordinates. They use real MNIST data to show the improved generalization ability of VAE."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The experiment results section is weak.\n- The evaluation is only qualitative comparison. Is there any quantitive metric (e.g. classification/prediction error) that can be used to compare the proposed method with existing methods?\n- Lack of comparison with other related hyperspherical VAE work listed in the section 2. related works. e.g. Hyperspherical Variational Auto-Encoder’ Davidson et al. (2018), Yang et al. (2023), Bonet et al. (2022), etc. \n- The paper only shows results on one real dataset. How well does the proposed method generalize to more datasets?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What are the main limitations of this method?\n- Could the authors provide a comparison with additional baselines and training times? It would be valuable to compare the generative performance and training times of the proposed method with other state-of-the-art approaches to evaluate whether the additional computations required for using hyperspherical coordinates are justified by the improvements in generation performance.\n- The authors state that ‘the random samples of an independent multivariate Gaussian distribution fall in the equator of a hypersphere, and thus none of them is near the singularities of the hyperspherical coordinates’. However, once the latent samples are forced away from the equator, could it be possible to fall near the singularities of the hyperspherical coordinates?\n- The authors have presented generated images only on MNIST, a relatively simple dataset that does not require a high-dimensional latent space to capture its features. As a result, introducing additional constraints in the latent space does not appear to limit its capacity to represent information. However, with more complex datasets, how can these constraints affect the expressivity of the model given that the representations tend to overlap (as stated in Figure 1)? \n- Comparing the results in Figure 2 is challenging because the configurations (dimensionality of the latent space) are positioned at different points (not aligned). Consequently, it is difficult to determine in which configurations or regimes the VAE with hyperspherical coordinates surpasses the vanilla VAE and vice versa.\n- In section 4.4, the authors generate new data sampling from a von Mises–Fisher distribution with the same mean and covariance as the ones empirically calculated from the latent embedding of the full test dataset. What is the motivation to use the empirical statistics of the test set instead of the training set for generation? \n- Could this method be extended/generalized to other distributions?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- To the best of my knowledge, this work introduces a novel approach to improve generative performance in VAEs by constraining latent representations, exploiting the hyperspherical coordinates formulation to reduce sparsity in the high-dimensional latent space.\n- I find the connection between VAE training and physical systems, such as spin glasses, particularly valuable, as it provides a novel perspective for understanding the model's training dynamics."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose expressing the latent variable of a standard VAE in hyperspherical coordinates, reformulating the KL term of the ELBO loss accordingly, to reduce the sparsity in high-dimensional latent spaces and improve the generative performance of the model. Their approach draws on a parallel between high-dimensional spaces in statistical physics, specifically spin glasses, and the training dynamics of a VAE."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The authors should revise the references (e.g., replace arXiv preprints with published versions where available, add access dates for blog references, and consider replacing Wikipedia links with more reliable sources). Additionally, some typos were noted in the main text, and Figures 4 and 5 appear in low resolution, making the text difficult to read.\n- I think the experimental section could be strengthened. Although the primary aim of this work is to improve the model's generative performance, the authors present generated samples only on MNIST, which is a simple dataset. Also, they do not compare the generative performance of the method with any other baseline (in the introduction they mention methods that improve generative performance by using more flexible priors but do not show if the results achieved are comparable).\n- The paper lacks a concluding discussion and does not explore potential limitations of the method or directions for future research, which I believe would add significant value."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- As mentioned in the weaknesses section, a VAE usually learns in a polarised regime when $\\beta$ is sufficiently high. In this setting, the latent representations contain two kinds of variables: active and passive. The passive variables are kept as close to the prior as possible and used to lower the KL divergence, while the active variables contain the information needed for reconstruction (or further use in downstream tasks). Active variables typically do not follow the prior as the KL is kept low by the passive variables. Instead, they have a very low $\\\\sigma$ such that during the reparametrisation trick, $z \\approx \\\\mu$. One would usually remove passive variables for downstream tasks, only keeping the small subset of variables containing some information [8]. I wonder if keeping only active variables would change the sphere projection to something more akin to what is obtained with hyperspherical coordinates?\n- What is the impact of using different values of $a_{\\mu, k}$ (keeping $a_{\\mu, k} \\neq 0$ of course)? Was there a specific reason to choose $a_{\\mu, k}=1$?\n\nReferences\n=========\n- [1] Kingma, D. P. and Welling, M. (2014). Auto-Encoding Variational Bayes. In International Conference on Learning Representations, vol. 2.\n- [2] Higgins, I., Matthey, L., Pal, A., Burgess, C., Glorot, X., Botvinick, M., Shakir, M. and Lerchner, A. (2017). $\\\\beta$-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework. In International Conference on Learning Representations, vol. 5.\n- [3] Rolinek, M., Zietlow, D. and Martius, G. (2019). Variational Autoencoders Pursue PCA Directions (by Accident). In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR).\n- [4] Dai, B., Wang, Y., Aston, J., Hua, G. and Wipf, D. (2018). Connections with Robust PCA and the Role of Emergent Sparsity in Variational Autoencoder Models. Journal of Machine Learning Research, 19(41), pp. 1–42\n- [5] Lucas, J., Tucker, G., Grosse, R. B. and Norouzi, M. (2019a). Don’t Blame the ELBO! A linear VAE Perspective on Posterior Collapse. In Advances in Neural Information Processing Systems, vol. 32.\n- [6] Bowman, S. R., Vilnis, L., Vinyals, O., Dai, A., Jozefowicz, R. and Bengio, S. (2016). Generating Sentences from a Continuous Space. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning.\n- [7] Rezende, D. J., Mohamed, S. and Wierstra, D. (2014). Stochastic Backpropagation and Approximate Inference in Deep Generative Models. In Proceedings of the 31st International Conference on Machine Learning, Proceedings of Machine\nLearning Research, vol. 32.\n- [8] Bonheme, Lisa, and Marek Grzes. \"Be more active! understanding the differences between mean and sampled representations of variational autoencoders.\" The Journal of Machine Learning Research 24.1 (2023): 15423-15452."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Originality\n========\n- I like the creative approach of solving the issue of poor image quality generation by applying a solution to a similar problem from physics.\nThis is quite different from previous publications about hyperspherical VAEs, where the main motivation (as far as I know) was to provide a better prior than the standard multivariate Gaussian.\n- The proposed model also differs from others in the hyperspherical VAE literature. Usually, changing the prior and posterior distribution can be complex as the reparametrisation trick and the KL divergence need to be updated accordingly. Here, the proposed solution is an elegant change of coordinates done after the reparametrisation trick during the KL divergence computation, making it easy to implement and intuitive to understand.\n\nQuality/Clarity\n===========\n- The section on spin glasses is well vulgarised, intuitive, and reads very well for non-physicians.\n- The paper is generally well-written and easy to follow.\n\nSignificance\n=========\n- Given the simplicity of the implementation, practitioners looking for better image generation quality with VAEs could easily adopt the proposed model.\n- The explanation of why the generation is bad at inference time is interesting for the research community working on the learning dynamics of VAEs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the poor image generation quality of VAEs at inference time when the latent representation is high dimensional. After hypothesising, based on spin glasses theory from statistical physics, that the issue comes from the sparsity of high-dimensional spaces. Based on this, they propose to apply a mechanism akin to the quenching process used to reduce the entropy of such systems. Practically, this is done by implementing a change of coordinates from Euclidean to hyperspherical during the KL divergence computation and setting the priors of each dimension of these new coordinates such that the latent samples are pushed away from highly entropic regions. This results in an improved generation quality for MNIST while keeping latents that are interpretable enough to perform clustering."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While I really like this paper's approach, I have some concerns about its empirical soundness and found several mistakes in the given equations (see major comments below). Clarifying some aspects of the paper would also strengthen it (see major and minor comments below). If these concerns were addressed, I would happily raise my score.\n\nMajor comments\n=============\n\nExperimental soundness\n---------------------------------\n- The experiment compares the results of $\\\\beta$-VAEs with change of coordinates + annealing with the results obtained with $\\beta$-VAEs without annealing. As a result it is not possible to see if the improved generation comes from the annealing or the change of coordinates. Additional results with $\\\\beta$-VAEs using the same annealing schedule would be needed to ensure that the improvement is really due to the change of coordinates.\n- While the experiment is partially done on CIFAR 10, it is hard to assess how several results generalise. For example, a comparison of the classification accuracy, examples of images generated, and a comparison of the projection into spheres using both models would be nice to have on CIFAR 10 as well.\n\nMathematical soundness\n----------------------------------\nIn section 1.1, several equations were incorrect.\n- In Eq. (1) $KLD(q_{\\\\phi}(z) || p_{\\\\theta}(z))$ should be $KLD(q_{\\\\phi}(z|x) || p_{\\\\theta}(z))$\n- In Eq. (2) the given formula is not for $KLD(z,\\\\epsilon)$ but for $-KLD(z,\\ \\epsilon)$. Furthermore, subscripts from the summations are missing. I would suggest either removing the second summation and using a matrix form like $KLD(z,\\\\epsilon) = \\\\frac{1}{2} \\sum^{N_b} \\\\bigl(Tr(\\\\sigma) + || \\\\mu||^2_2 - \\\\log det(\\\\sigma) - n\\\\bigr)$, or rewriting it with subscripts as $KLD(z,\\\\epsilon) = \\\\frac{1}{2} \\sum^{N_b} \\sum_{k=1}^n \\\\bigl(\\\\sigma_i^2 + \\\\mu_i^2 - \\\\log(\\\\sigma_i^2) -1 \\\\bigr)$.\n- In Eq. (3), this is not the ELBO $\\\\mathcal{L}$ from Eq. (1) but its negative approximation $- \\\\tilde{\\\\mathcal{L}}$ which is minimised by the VAE. While the original formulation of VAEs by [1] does not contain a $\\\\beta$ term and is equivalent to setting $\\\\beta=1$, it would be interesting to briefly discuss what is the impact $\\\\beta$ when $\\\\beta > 1$ and when $\\\\beta < 1$. Indeed, the motivation for both settings is very different: the first is to provide \"disentangled representations\" [2] and to force the VAE to learn in a polarised regime (a.k.a selective posterior collapse), which is akin to a PCA-like behaviour [3,4,5], while the second aims at mitigating posterior collapse and is often used together with annealing [6]. Thus, the choice of $\\\\beta$ has a practical impact on the proposed experiment (see further discussion on the questions part below).\n\nClarity\n---------\n- To facilitate the understanding of the paper, it would be great to have the derivations from Eq. (4) to Eq. (5) and from Eq (8) to Eq (9) in appendix.\n\n\nMinor comments\n=============\n\nMathematical notation\n------------------------------\nThe current notation is sometimes confusing. For example, l. 77 $\\\\mathcal{N}(z; \\\\mu, \\\\sigma)$ reads as \"the univariate Gaussian with mean $\\\\mu$ and standard deviation $\\\\sigma$\" while it is in fact a multivariate Gaussian. A suggestion to improve this is to use a different notation for numbers, vectors, and matrices, following, for example the notation suggested in the math_commands.tex file of the ICLR template.\n\nClarity\n---------\n- l. 478, I found the sentence \"the quality of AE and VAE [...]\" confusing as AEs are not discussed anywhere else in the paper and are not used in the experiment. I would suggest removing the part about AE to make the argument clearer.\n- I struggle to see what 32% of computation time of the KLD represent in term of additional training time. It would be easier to see with an average run time with and without change of coordinates over n seeds and k epochs. Furthermore, if this increases with the number of dimensions, an estimate of the increase rate using big O notation would be very useful for practitioners to assess whether this implementation is suitable to their needs.\n- The seminal papers on VAEs are [1,7] which are different from the ones references. I would suggest updating this.\n\nTypos\n--------\n- l. 91 weighs -> weights\n- l. 100 teh -> \"that the\" ?\n- l. 133 there's -> there is\n- l. 133 have -> has\n- l. 157 it's -> it is\n- l. 422 gven -> given\n- KL divergence is inconsistently refered to as \"KL divergence\" and \"KLD\" in the paper.\n- l.591-592, the title of Higgins et al. is capitalised while others are not."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose to convert the latent variables of a VAE to hyperspherical coordinates. This allows to move the latent vectors to a small island of the hypersphere, reducing sparsity. We showed that this improves the generation quality of a VAE."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024is,\ntitle={Is the sparsity of high dimensional spaces the reason why {VAE}s are poor generative models?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4xEACJ2fFn},\nnote={under review}\n}"
},
"abstract": {
"value": "Variational autoencoders (VAE) encode data into lower dimension latent vectors before decoding those vectors back to data. Once trained, decoding a random latent vector usually does not produce meaningful data, at least when the latent space has more than a dozen dimensions. In this paper, we investigate this issue drawing insight from high dimensional physical systems such as spin-glasses, which exhibit a phase transition from a high entropy random configuration to a lower energy and more organised state when cooled quickly in the presence of a magnetic field. The latent of a standard VAE is by definition close to a uniform distribution on a hypersphere, and thus similar to the high entropy spin-glass state. We propose to formulate the latent variables of a VAE using hyperspherical coordinates, which allows to compress the latent vectors towards an island on the hypersphere, thereby reducing the latent sparsity, analogous to a quenched spin-glass. We show that this is feasible with modest computational increase and that it improves the generation ability of the VAE."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"variational autoencoder",
"generative model",
"high dimensional statistics",
"spin glass",
"latent space",
"hyperspherical coordinates"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5d619309dbe0bd30676f1d978912520b27918362.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Is the sparsity of high dimensional spaces the reason why VAEs are poor generative models?"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4xWQS2z77v | Exploring The Loss Landscape Of Regularized Neural Networks Via Convex Duality | main | Active | Convex duality;Machine Learning Theory;Loss Landscape;Optimal Sets | learning theory | 5;6;6;6;8 | 3;4;2;2;5 | 3;3;3;3;3 | 3;3;3;3;3 | 1;2;2;2;4 | 6.2 | 3.2 | 3 | 3 | 2.2 | 0.665133 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "No questions."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-written, easy to follow, and its results are clear and novel. The theoretical results stand out for their depth and clarity, as do the empirical results. The support of images is quite helpful when reading through some mathematical arguments or proofs of theoretical results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a deep and novel analysis of the loss landscape and a solution in the context of regularized neural networks. They also show that the topology of the global optima undergoes a phase transition as the width of the network changes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Perhaps a deeper discussion on the topological implications of their results would be beneficial."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Is there a way to bound or estimate the critical widths $m^*$ and $M^*$ in practice, for instance on real datasets? \n2. In line 186: What does $h$ refer to in $\\text{diag}[1 (Xh \\geq 0)]$ ?\n3. It is not very clear to me what lines 225-226 mean. Could you perhaps rephrase it? (That $\\mathcal{P}^*_{\\nu^*}$ does depend on $\\nu^*$, but that the specific choice of it does not matter.)\n4. Figure 2 bottom: The axis labels are missing and it is not very clear to me what the red and blue lines are supposed to represent.\n5. In line 351 the author mention three interpolation problems of interest, but only discuss one problem on the minimum-norm interpolation problem. What are the other two interpolation problems and can you also extend your results to these problems?\n6. The paper describes a path of nonincreasing loss that connects local to global minima. Could this insight be incorporated into practical training algorithms, such as initializing weights or guiding optimizers in large-scale training?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- I found the \"staircase of connectivity\" very insightful, particularly how the connectivity properties of the optimal solutions are connected to critical widths $m^*$ and $M^*$. This finding explains how increasing the number of neurons affects the connectedness of optimal sets, and makes the observation of mode connectivity [Garipov et al. 2018] more precise. \n- The paper generalizes its findings also to vector-valued networks and deep networks with skip connection, which provides a broader framework that can be applied across different architectures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work the authors analyze multiple aspects of the loss landscape of regularized two-layer neural networks with scalar output, including the structure of stationary points, the connectivity of optimal solutions and the non uniqueness of optimal solutions. The main proof strategy is to translate the problem into an equivalent convex problem and characterize its solution set through its dual form. \nThe authors show that the topology of the global optima goes through a phase transition as a function of the hidden layer width, which they term the staircase of connectivity. \nThis result is extended later to networks with vector-valued outputs, and parallel deep networks of depth 3."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I found the theoretical results, for instance on the staircase of connectivity, hard to interpret in practice and would benefit from more accessible explanations. While the toy example in Example 1 illustrates the concept, the absence of labels in Figure 2, as well as the notation-heavy formulation makes it difficult for readers to grasp the results intuitively. \n- Although the toy examples are helpful, the paper lacks actual empirical validation of the theoretic results. I think it would add credibility to this work, if the staircase of connectivity concept would also be tested on actual neural network architectures trained on real data. It would be interesting to see how these results scale with different data distributions and larger models. \n- Overall I found the work quite difficult to read due to the dense mathematical formalism. I also feel like the section on notations should not be in the appendix, but should - at least in a shortened version - be included in the main paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "I wonder if the authors have any comment regarding the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors apply the new technique of convex duality to problems of connectivity and minimal-norm interpolation, which have been studied previously using other methods. This approach yields both generalizations of existing results and new insights into these problems. Overall, I believe this paper is a strong demonstration of how convex duality can be leveraged in the theoretical study of machine learning. The abstract concepts are clarified through figures and examples."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors apply convex duality for two-layer ReLU networks to study mode connectivity and unique minimal-norm interpolator problems, while also working to generalize this framework. Specifically, the authors have:\n\n* identified the staircase of connectivity that describes how connectivity evolves with width;\n* constructed none-unique minimal-norm interpolator by breaking the uniqueness conditions;\n* generalized the optimal polytope to the general cone-constrained group LASSO problem and applied it to more complicated architectures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have some concerns with the presentation of this work. Specifically:\n\n* If I understand correctly, the convex duality only applies to ReLU networks. This is not emphasized.\n* I found Section 2 difficult to follow without prior knowledge of Pilanci & Ergen (2020). The relations between (1), (2), and (3) are mentioned but not explained (When do they have the same loss value? How do the solutions relate to each other?) Dimensions of $X$ and $y$ are not mentioned. $D_i$ is not explained.\n* In Figure 1, is each red point truly a unique solution, or does it represent solutions equivalent under permutation (p-unique)? If they are p-unique solutions, readers may get the wrong impression. \n* The lower half of Figure 2 is not explained."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. A general question is about the scope of the techniques in this paper. It seems that the techniques only apply to two-layer ReLU networks, since the problem can be equivalently written as a convex problem with regularization. It is not applicable to other activation functions and seems hard to generalize to multi-layer cases. Thus, could you elaborate more on the universality of the techniques?\n\n2. The results in this paper require the number of neurons $m \\geq m_*$. As far as I understand, $m_*$ is the minimal number of neurons needed to achieve the optimal model. I'm wondering what would happen if $m<m_*$? Also, in general, what is the scaling of $m_*$ depending on $n,d$? \n\n3. The results in Theorem 2 consider the connectivity of the optimal solution set, which is equivalent to the connectivity of a path with $0$ perturbation. What about the case that allow $\\epsilon$-pertubation along the path? Is the techniques still applicable?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper develops a general framework to characterize the global optimum of the regularized ReLU network via convex duality. From my understanding, the key contribution of this convex duality framework in Theorem 1 is that it allows one to characterize the \"direction\" of the weights separately in the regularized case, which is then useful for characterizing the global optimum. I believe this contribution is novel and solid. \n\n2. I think the framework of characterizing the global optimal is quite general even though it is restricted to ReLU network. In particular, it do not require large over-parameterization, special scaling, or special data distributions. Thus, I believe the results can be applied to other more specific settings and is potentially useful for characterizing other properties besides the connectivity of the solutions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the loss landscape of ReLU networks with $L_2$-regularization. The authors first study the canonical case of a two-layer network with scalar output, and characterize the connectivity of the solution for different number of neurons. Then, the authors extend the results to a more general class of problems, including: minimal norm interpolation, vector-valued ReLU network, and parallel deep neural network."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.Although I believe this paper has a solid contribution, I found there's a few part I don't understand the significance: \n- I think I understand the contributions in section 3.1 and 3.2, however, I'm not sure about In section 3.3, the authors showed that for a class of data set with dimension $=1$ that satisfies certain conditions, if the network do not have skip connection, then there are infinitely many minimal norm interpolators (which is a connected set ). I'm not sure the significance of these results, since (1) it is for a special construction of dataset. (2) it might be that those infinitely many minimal norm interpolators behave qualitatively almost the same, for example, the radius of the solution set is small. Could you discuss more on the significance of the results?\n\n- In section 4, I understand the contribution of generalizing it to a vector-valued function. However, I'm not sure the significance of the results in Theorem 4. Since anyway you fixed all the other layers but only keep two consecutive layers, and technically I didn't see any difference from a two-layer network. Could you discuss more on the significance of the results?\n\n2. One main issue of the paper is the writing, especially the main part of the paper. I check the appendices, and it is much more readable. So I suggest the authors consider rearranging the content. To name a few issues\\typos that confuse me when reading the main part: \n\n - Line 215: and 216, what is the definition of $\\mathcal{S}_i$?\n - Line 223: what the definition of \"optimal model fit\", what is $u_i^*, v_i^*$, and why it is unique?\n - Line 232: the triangle inequality is reversed. Also could you be more specific about the discussion between Liine 229-232?\n - The statement of Proposition 1: First, you use $v_{i,1}$ to denote the first entry of a vector, could you specify this? Also, you define $s_k = \\sum_{i=1}^k v_{n-i+1},$ but also require $||s_k|| =1, s_n = [0,1]^\\top$, could you discuss the existence of such construction? \n - In equation (7), could you specify the dimension of the variables?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Can the authors proofread again the manuscript to eliminate typos, unclear sentences and missing notation?\nCan they make the presentation of the result more readable, including relevant results from the Appendix?\nCan they integrate the relevant existing literature in the Related Work section?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper has original content, and bridges together several concepts proposed in the literature on the topic. The method of analysis is rigorous and it gives a solid contribution to the field."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript proposes a characterization of the topology of the global optima in the loss landscape, focusing on regularized two-layers neural networks with free skip-connections with a partial extension to deep neural networks. The authors provide a characterization of the optimal set in terms of the width of the hidden layer, which determines a so-called \"staircase of connectivity\" when such a width occurs in critical values and phase transitions. The authors study the uniqueness of the minimum-norm interpolator, highlighting necessary guarantees (such as the free ski connections, bias in the training problem and unidmiensional data). An experimental study integrates the theoretical findings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The manuscript presents some unclear sentences (e.g. line 198-199). There is a clear math error at line 232 (the triangle inequality holds with the reverse inequality; to be candid, I am quite sure it is a typo) and some symbols are not defined at all, not even in the Appendix (e.g. the symbol P, that occurs very often through the entire manuscript). \nThere are many references to results listed in the Appendix; if relevant, I think it might be better to put them in the main manuscript.\nAn important reference to the characterization of loss landscapes over neural networks with regularization terms and/or skip connections is missing, also because it gives a theoretical hint on the low importance of skip connections [1].\n\n[1] Bucarelli, M. S., D’Inverno, G. A., Bianchini, M., Scarselli, F., & Silvestri, F. (2024). A topological description of loss surfaces based on Betti Numbers. Neural Networks, 106465."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We investigate the loss landscape and topology of the optimal set of neural networks using convex duality."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024exploring,\ntitle={Exploring The Loss Landscape Of Regularized Neural Networks Via Convex Duality},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4xWQS2z77v},\nnote={under review}\n}"
},
"abstract": {
"value": "We discuss several aspects of the loss landscape of regularized neural networks: the structure of stationary points, connectivity of optimal solutions, path with non-increasing loss to arbitrary global optimum, and the nonuniqueness of optimal solutions, by casting the problem into an equivalent convex problem and considering its dual. Starting from two-layer neural networks with scalar output, we first characterize the solution set of the convex problem using its dual and further characterize all stationary points. With the characterization, we show that the topology of the global optima goes through a phase transition as the width of the network changes, and construct counterexamples where the problem may have a continuum of optimal solutions. Finally, we show that the solution set characterization and connectivity results can be extended to different architectures, including two layer vector-valued neural networks and parallel three-layer neural networks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Convex duality",
"Machine Learning Theory",
"Loss Landscape",
"Optimal Sets"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e2aa7c3ffd9df4b407f85c7290421cfd69aa88b7.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/f41e0eed403477ede6e6aad5ef92572362054908.zip"
},
"title": {
"value": "Exploring The Loss Landscape Of Regularized Neural Networks Via Convex Duality"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4xbwWerxvZ | Consistency Model is an Effective Posterior Sample Approximation for Diffusion Inverse Solvers | main | Active | Diffusion model;Inverse problem | generative models | 5;5;5;6 | 4;3;3;5 | 3;3;2;3 | 3;3;2;2 | 3;3;2;2 | 5.25 | 3.75 | 2.75 | 2.5 | 2.5 | 0.870388 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see above"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper considers an important problem: how diffusion models can be used to solve complex inverse problems, such as giving a segmented image to reproduce the underlying image. The paper acknowledges a known limitation on one assumption that prior work makes on the distribution of the data (which allows to use the Tweedie's formula), and proposes to address it by consistency models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes using consistency models (CMs) to solve inverse problems with diffusion models. In diffusion inverse problems, common approaches need to go from xt to x0 at every iteration to be able to compute a measurement-guided gradient with respect to the measurement y from x0. The majority uses the expectation from Tweedie's formula to compute x0 from tx, which may not result in a good example for complex multi-modal distribution. The paper proposes to replace this step with a CM. They show improvement upon prior work."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I find the contribution of the contribution incremental, and the presentation can be improved. The main weaknesses are as follows: the mathematical formulation is confusing and the approach to training the CM is not fully fair compared to the baselines.\n\n1. The paper trains a CM to be used for diffusion-based inverse solvers. It's not clear why such mathematical details (some are not fully proper) are included, which I do not think is the main contribution of the paper. It's unclear why in (1) the authors formulate the Markov chain following a VE formulation. Could the author explain this choice? The conventional Markov chain for diffusion models is the popular one based on VP, which has a different mean and variance such that the structure is destroyed, but the energy of the process remains the same.\n\n2. Solving inverse problems with diffusion mostly occurs in the regime where the diffusion model is trained unconditionally without the knowledge of the measurement operator f(.) (see the DPS used for vision problems such as deblurring). However, the paper in Section 3.4 discusses that the CM is overfitting to f(.) and proposes an approach to make the framework robust. So this brings the following: the CM going from xt to x0 seems to be trained with the knowledge of the measurement. If f(.) is involved during the training, then the trained framework is not general anymore (it's problem specific). Hence, the comparison of the proposed framework to models such as DPS, where the model is not trained based on the measurement operator, is not fair. Please provide more information and clarity if this is not the case. The fair comparison would be a scenario where both methods are trained a similar condition (e.g., not having the knowledge of the forward operator).\n\nHere are some questions concerning this:\n\n- Is CM trained with knowledge of f(.), or if this is a misunderstanding?\n- If the CM is trained with f(.), please explain and justify comparing it to methods like DPS that don't use this information?\n\nMore comments are below:\n\n\nLack of thorough literature\n\n- The limitations of DIS are not fully explained in the intro. Indeed, the mean-based approximation is one challenge. A few others are related to whether methods such as DPS are doing posterior sampling or using the measurement to guide the process onto likely solutions (see [1]).\n\n\nThe paper needs improvement in presentation. Here are a few examples\n\n- While the notations such as Xt and X0 are known to the reader with knowledge of diffusion models, these are used in the abstract and intro without introducing them. Hence, I suggest re-writing the abstract without these notations and introducing the diffusion in the introduction before using x0, xt, etc.\n\n- Consistency models are not defined and introduced in the intro, but the authors explain that they are used to improve performance. I suggest the authors to provide a brief definition or explanation of consistency models in the introduction.\n\n- How the results are generated for Table 1; this appears abruptly without proper explanation. I suggest to include a brief explanation of the methodology used to generate the results in Table 1.\n\nSome terms within the manuscript are not precise and clear. Please provide clarifications.\n\n- Section 1: with \"neural network operators\"? Does this refer to the measurement operator or the score function of the diffusion? I suggest to say \"measurement operators\" instead of \"operators\". Please clarify \"neural network operators\"?\n\n\n[1] Wu, Z., Sun, Y., Chen, Y., Zhang, B., Yue, Y., & Bouman, K. L. (2024). Principled Probabilistic Imaging using Diffusion Models as Plug-and-Play Priors. arXiv preprint arXiv:2405.18782."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. More DIS baselines are desired such as CoSIGN [2], DDNM [3]\n\nI am open to change my rating if authors could address my concerns (how clearly a CM model could benefits with inverse problem solving comparing to strong baselines like DDNM)"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Easy to follow\n2. The idea of using CM to approximate the PF-ODE solution is interesting"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an interesting approach for posterior sampling using $p(x_0|x_t)$ being approximated via consistency model. Results show improvement over baselines such as DPS"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Using CM over the existing inverse problem solving adds a lot of computational burden, while the benefit is not clearly visible. Even though the $x_0|x_t$ may be off the manifold of ground truth image distribution, this does not imply a sacrifice in reconstruction quality as demonstrated in this work [1]. The quantitative results do not show a significant improvement over DPS.\n2. CM itself can be used as a good prior for solving inverse problems: see this work [2]. This paper needs to compare with more recent works in inverse problem solving\n3. More DIS baselines are desired such as DDNM [3]\n\n\n\n\n[1]. Wang, Hengkang, et al. \"DMPlug: A Plug-in Method for Solving Inverse Problems with Diffusion Models.\" arXiv preprint arXiv:2405.16749 (2024). NeurIPS 2024\n\n[2]. Zhao, Jiankun, Bowen Song, and Liyue Shen. \"CoSIGN: Few-Step Guidance of ConSIstency Model to Solve General INverse Problems.\" ECCV 2024\n\n[3]. Wang, Yinhuai, Jiwen Yu, and Jian Zhang. \"Zero-Shot Image Restoration Using Denoising Diffusion Null-Space Model.\" The Eleventh International Conference on Learning Representations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The presentation of Proposition 3.3 is a bit misleading. The authors should state in the assumption the condition on $\\sigma$ that they use in the proof in order to get a lowerbound independent of the dimension, or remove this claim after the proposition."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea is quite original wrt to the literature. The paper is quite illustrated and clear. The experiments are interesting and extensive; the authors compare to many existing methods, on both pixel space and latent space diffusion."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is about solving inverse problems using a pre-trained denoising diffusion prior. Posterior sampling with diffusion models requires an estimate of the score $\\nabla _{x_t} \\log p_t(x_t) + \\nabla _{x_t} \\log \\int p(y | x_0) p _{0|t}(x_0 | x_t) d x_0$. While the first term is estimated using the pre-trained score, the second term is usually very difficult to estimate accurately. A common approximation used in the literature involves using $\\nabla _{x_t} \\log p(y | E[X_0 | X_t = x_t])$ where $E[X_0 | X_t = x_t]$ can also be estimated using the pre-trained score via Tweedie's formula. This approximation results in many efficiencies well documented in the literature and this paper tries to circumvent them using by using a sample from the PF-ODE as a replacement. Specifically, the authors use consistency models to speed up the process of sampling from the PF-ODE and ensuring that the differentiation is not costly."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The most obvious weakness of the method is the use of consistency models since these are quite difficult to train and pre-trained CMs are not widely available. \n\n- In my opinion the justification for the method is rather weak. The paper argues that using a sample from the PF-ODE is valid because the sample has zero density and that furthermore, for the Gaussian mixture example considered, the PF-ODE sample has non-zero density under the posterior $p(x_0 | x_t)$ with high probability. At the same time it is also easy to find examples of a likelihood function $p(y|x_0)$ such $\\int p(y|x_0) p(x_0 | x_t) d x_0 > 0$ for all $x_t$ but $p(y|\\Phi(t, x_t)) = 0$ for $x_t$ in a set of positive Lebesgue measure. As a result, I'm not totally convinced that the argument is strong."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The presented manuscript often states likelihood terms as $p_\\theta(y|x)$. Please elaborate on why it contains theta, as the likelihood in general is not a learned function with the same parameters as the learned data distribution $p_\\theta(x)$.\n\nFurthermore, I would be interested as to which extend the PF-ODE solution differs from the MAP solution?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea of using CMs in the process of inverse problems/posterior approximation seems novel and interesting.\n\nThe proposed method outperforms baseline approaches, particularly when compared to straightforward extension"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a new approach using CMs to generate realistic image samples in Diffusion Inverse Solvers, improving the application of complex, non-linear neural network operators like those in semantic segmentation, room layout estimation, image captioning, and image classification. Unlike traditional methods that produce low probability images, the incorporation of CMs is expected to maintain sample realism, resulting in more accurate posterior approximations, particularly when neural network-based operators are involved."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although being interesting, the novelty of the presented work is marginally above acceptation threshold since the only contribution seems to be the use of CMs in order to compute $x_0$ from $x_t$.\n\nThe manuscript could benefit from improved clarity and organization, as certain sections are challenging to follow. See further remarks.\n\nFurther remarks: \n\nIn the presented algorithm, the updates are stated as $\\zeta_t \\Delta\\left(f(x_0 \\mid t), y\\right)$, where $\\Delta$ is defined as some distance. The update of $x_t$ however should be the gradient of that distance.\n\nIn order to enhance the readability of the work, the authors should think about introducing a clear distinction between the posterior $p(x_0|x_t)$ and the posterior $p(x|y)$, given y is the observation.\n\nThe abbreviation DPS (Diffusion posterior sampling) is used but never introduced."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose to use consistency model instead of posterior mean to approximate posterior samples during diffusion posterior sampling."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024consistency,\ntitle={Consistency Model is an Effective Posterior Sample Approximation for Diffusion Inverse Solvers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4xbwWerxvZ},\nnote={under review}\n}"
},
"abstract": {
"value": "Diffusion Inverse Solvers (DIS) are designed to sample from the conditional distribution $p_{\\theta}(X_0|y)$, with a pre-trained diffusion model $p_{\\theta}(X_0)$, an operator $f(.)$, and a measurement $y=f(x'\\_0)$ derived from an unknown image $x'\\_0$. Existing DIS estimate the conditional score function by evaluating $f(.)$ with an approximated posterior sample drawn from $p\\_{\\theta}(X_0|X_t)$. However, most prior approximations rely on the posterior means, which may not lie in the support of the image distribution and diverge from the appearance of genuine images. Such out-of-support samples may significantly degrade the performance of the operator $f(.)$, particularly when it is a neural network. In this paper, we introduces a novel approach for posterior approximation that guarantees to generate valid samples within the support of the image distribution, and also enhances the compatibility with neural network-based operators $f(.)$. We first demonstrate that the solution of the Probability Flow Ordinary Differential Equation (PF-ODE) with an initial value $x_t$ yields an effective posterior sample of $p_{\\theta}(X_0|X_t=x_t)$ with high probability. Based on this observation, we adopt the Consistency Model (CM), which is distilled from PF-ODE, for posterior sampling. Through extensive experiments, we show that our proposed method for posterior sample approximation substantially enhance the effectiveness of DIS for neural network operators $f(.)$ (e.g., in semantic segmentation). The source code is provided in the supplementary material."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion model",
"Inverse problem"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/736044c787692a45ffaa0a19841cd9e3977f2f8b.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/3cf1363286ff310a24a3391fe90f153f7212aea6.zip"
},
"title": {
"value": "Consistency Model is an Effective Posterior Sample Approximation for Diffusion Inverse Solvers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4y4t7yOvJO | POMONAG: Pareto-Optimal Many-Objective Neural Architecture Generator | main | Active | Neural Architecture Search;Many-Objective;Pareto-Optimal;Meta-Dataset;Transferable Neural Architecture Search | transfer learning, meta learning, and lifelong learning | 1;3;5;5;6 | 5;5;5;3;3 | 2;2;3;3;3 | 2;2;2;2;3 | 1;2;2;3;4 | 4 | 4.2 | 2.6 | 2.2 | 2.4 | -0.684653 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "We are most grateful for the reviewer's thorough analysis and recognition of our work's quality.\n\nRegarding the Pareto Front filtering (lines 299-304), this occurs after architecture generation. For each architecture, we compute the parameters, MACs and inference latency, whilst using the predictor to estimate accuracy. From these Pareto fronts, we identify three configurations per secondary metric: the most efficient architecture (lowest metric), the most balanced (optimal accuracy/metric trade-off), and the most accurate (highest predicted accuracy). This approach provides practitioners with clear options suited to different deployment scenarios.\n\nThe foundational works - MetaD2A (Lee et al., ICLR 2021), TNAS (Shala et al., ICLR 2023) and DiffusionNAG (An et al., ICLR 2024) - were all published at ICLR and establish MobileNetV3 and NASBench201 as standard benchmarks. Whilst POMONAG builds upon this established research trajectory, we have substantially expanded the validation across a broader range of datasets to demonstrate wider applicability.\n\nOur contribution extends well beyond enhancing DiffusionNAG. A primary innovation is the formulation of Many-Objective Reverse Diffusion Guidance, which elegantly balances four distinct gradients during generation. The optimisation of these gradients presented unique challenges: the scaling factors operate across vastly different scales, whilst maintaining convergence and architectural quality. We addressed this through a novel two-phase approach (lines 324-333) optimised via Hyperband pruning.\n\nThe Performance Predictors underwent significant redesign, yielding marked improvements in Spearman correlation (from 0.687 to 0.855). The expanded Meta-Dataset properly supports multi-objective optimisation, whilst our Pareto-optimal filtering identifies three practical configurations (Acc/Bal/Eff) suited to different deployment contexts. The empirical results validate these contributions conclusively: POMONAG surpasses DiffusionNAG in both accuracy (+4.06% on NASBench201) and efficiency metrics, with remarkable reductions in parameters (90%) and MACs (93%).\n\nWe might also note that POMONAG achieves these improvements whilst requiring only a single architecture to be trained per dataset, significantly reducing computational overhead compared to prior approaches.\n\nWe trust these clarifications address the points raised and demonstrate the substantial nature of our contributions. We are grateful for the reviewer's careful consideration and hope these explanations enable a fuller appreciation of the work's merit."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Moreover, I have the following questions:\n\n- Can the authors provide more theoretical or empirical justification for the scaling factors in the Pareto Front Stretching process? How sensitive is the model to these values?\n\n- Can the authors provide more detail on the architecture sampling process, dataset splits, and hyperparameter tuning methods used in the experiments? This is particularly important for the performance predictors."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The motivation to extend DiffusionNAG to a many-objective setting is valid and POMONAG does so by incorporating both accuracy and efficiency metrics like latency and MACs, which are critical for resource-constrained environments. The paper provides extensive experimental comparisons with DiffusionNAG, including evaluations across multiple datasets and search spaces, which helps demonstrate the general applicability of POMONAG.\nBalancing the different objectives being optimized is also very important in my opinion. The authors do so by proposing a pareto front filtering and stretching subroutine."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces POMONAG, an extension to DiffusionNAG that applies a many-objective diffusion model to optimize neural architecture generation for many-objective optimization. By incorporating additional performance predictors for hardware efficiency metrics such as number of parameters, multiply-accumulate operations (MACs), and inference latency, POMONAG aims to provide a more balanced approach to architecture optimization across accuracy and computational efficiency. Experiments validate POMONAG’s efficacy on two major CNN search spaces (NASBench201 and MobileNetV3)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have the following main concerns related to this submission, which I believe were crucial in the final decision:\n\n- **Incremental Contributions**: Although POMONAG claims to extend DiffusionNAG’s capabilities by addressing more objectives, the modifications appear incremental and lack substantial theoretical advancement. More specifically, I see the adaptation of diffusion models to accommodate multiple objectives, as described in section 3.1, more as a technical modification rather than a novel conceptual framework. I would recommend the authors to reiterate over their methodology and pinpoint the main contributions of their approach.\n\n- **Experimental Evaluation**: The benchmarks that POMONAG was evaluated contain only CNN spaces. It would be beneficial for the paper if the authors would demonstrate the efficacy of POMONAG in Transformer search spaces, such as the one from HW-GPT-Bench [1]. Most importantly, in the multi-(many-)objective experiments, the proposed method is not compared to any baseline. I would recommend the authors to add baselines in their experimental evaluation and report hypervolume indicator together with the individual objective values, as well as the search time. Ultimately, I would also be interested in visualizing the pareto front plots in the main paper. As for baselines, you can find a non-exhaustive list of simple ones in SyneTune (https://syne-tune.readthedocs.io/en/latest/getting_started.html#supported-multi-objective-optimization-methods). Finally, the experiments lack a thorough ablation study that demonstrates the impact of POMONAG’s unique contributions independently of DiffusionNAG’s foundational structure. \n\n- **Clarity and Presentation**: The paper seems to have a somehow fragmented structure, making it challenging for readers to follow the main contributions and crucial take-away points. Equations are not thoroughly explained, and there is a heavy reliance on citations from DiffusionNAG rather than a detailed elaboration of POMONAG itself, making the paper not self-contained. One major point here, which I have also pointed out to the AC, is that the authors have used a smaller font size starting from page 4. The guidelines clearly state that the maximum page limit is 10 and that means 10 pages with the default font size, not a smaller one. I suggest the authors that in future submissions they adhere to the submission guidelines.\n\n\n-- References --\n\n[1] Sukthanker et al. HW-GPT-Bench: Hardware-Aware Architecture Benchmark for Language Models. In NeurIPS 2024 DBT"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See above as the questions are mostly addressing the weakness of this paper."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The motivation of this study, introducing multi-objective evaluation in NAS, is commendable as a task in reality is often not just about accuracy. Other metrics should be considered simultaneously as well.\n\nThe writing is easy to follow. \n\nIt is nice to see equations with highlights of different colours."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study improved DiffusionNAG by introducing a multi-objective approach which modifies DiffusionNAG's reverse diffusion process as a reverse diffusion guidance process. Other than accuracy, #params, MACs and inference latency are also considered in the multi-objective metrics. The proposed method POMONAG has been tested on NASBench201 and MobileNetV3 with 15 image classification tasks, showing better performance than DiffusionNAG and a series of other methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**First of all**, the work claims to be on Pareto multiobjective search for architectures. However, that point is not obvious from the paper. \n* What are the benefits of using the proposed POMONAG? \n* How can a Pareto front be generated and utilized? Need to explicitly demonstrate how POMONAG generates and utilizes Pareto fronts.\n* How can users select architectures from the Pareto front according to their needs or under different circumstances? Show examples of such selection based on different priorities, for example prioritizing small-size architectures for portable devices or focusing on latency reduction etc. \n* It seems non-dominated sorting is absent. Explain how non-dominated sorting is incorporated or can be incorporated in POMONAG.\n* In its current form, the paper reads like a combination or integration of single-objective evaluations rather than a multi-objective evaluation. The equation of POMONAG at Line 209/210 is a linear combination of four objectives. Please clarify if the linear combination of objectives is intended as a scalarization approach. If so, discuss its limitations.\n--- \n**Secondly**, the performance of POMONAG appears better than DiffusionNAG and other methods shown in the paper. However many SOTA methods, especially zero proxy methods are missing. Their reported performance is similar or even better, for example, SWAP-NAS by Peng et al, ICLR'24, ZiCo by Li et al, ICLR'23, MeCo by Jiang et al, NeurIPS'23.\n* Include a comparison with these SOTA methods. If a direct comparison is not possible, explain why and discuss the limitations of the current evaluation.\n* Discuss how POMONAG's approach differs from or improves upon zero-proxy methods. \n\n--- \n**Thirdly**, the computational cost aspect of POMONAG is weak. The section \"Generation and Training Time\" should be better presented. The method requires a diffusion generation phase which takes extra time. That itself is a disadvantage. Also timewise, POMONAG cannot claim superiority as recent methods mentioned earlier are faster.\n* Present a detailed table comparing computational costs (including generation and training time) of POMONAG with other methods, including these zero proxy methods mentioned above. Seemingly these methods are faster. If POMONAG is indeed slower, discuss potential optimization strategies.\n* Discuss the trade-offs between the additional diffusion generation phase and the method's performance gains. Justify why the additional computational cost might be worthwhile.\n--- \n**Other points:**\n \nThe link at Line 091 is showing. Also, including the code and dataset would be helpful for the assessment.\n\n--- \n\nFig 1 is not quite readable. The figure further makes POMONAG look like three single-objective tasks combined rather than a four-objective task.\n* Improve readability, especially on the right-hand side.\n* Better illustrate the integration of all four objectives in a unified multi-objective framework if these objectives are not just simply added together (*see the first part of my comments*).\n * Provide a clearer visual representation of how POMONAG handles the trade-offs between objectives (*see the first part of my comments*).\n--- \nLine 186, the term noisy architecture is not explained. \n* Provide a brief explanation of what \"noisy architecture\" means in this context and how it relates to the diffusion process in DiffusionNAG.\n--- \nEquations and their connection to the processes/algorithms are not numbered and not clearly explained. \n* Number all equations for easy reference\n* Clearly label the equation at Line 183. Is this equation for the Reverse Diffusion Process? Clarify that connection.\n* Provide a brief explanation of the symbols used in this equation and other key equations.\n* Explain the purpose of transformation s_θ(A_t,t).\n* Explain the exact differences between the Reverse Diffusion Process and the Reverse Diffusion Guidance Process.\n--- \nLine 280, \"Four are dedicated to the respective estimation of accuracy, parameters, MACs, and inference latency of noisy architectures during the diffusion phase. \" \n* Explain why not use these four metrics for denoised architectures as well.\n* Justify the point that the denoised architecture uses accuracy as its only metric.\n--- \n\nExplain the reason why POMONAG utilises Vision Transformer ViT-B-16 instead of other models (Line 286).\n\n--- \nIt is good to see the Spearman correlation experiment. That is very important in NAS studies. However, for a thorough comparison of correlation, it should be done on a set of tasks like NAS-Bench-Suite-Zero (Krishnakumar et al. NeurIPS'22).\n* Perform a similar thorough comparison comparing correlations on different tasks using different search spaces.\n--- \nIn lines 400-402, the same latex problem appeared several times, ` not ' for the left quotation marks, Accuracy, Params, MACS ... \\\n* Fix these formatting issues.\n--- \nValidity, uniqueness and novelty are nice metrics for a population of solutions but not so critical for tasks that focus on accuracy and speed. What is the point of being excellent on these points but without good accuracy and speed?\n* Explain the significance of these three additional metrics: validity, uniqueness and novelty.\n* Show example how these measures can help improve the quality of generated architectures in POMONAG."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have several questions about this work.\n\n1, How to decide the scaling factors? Since the intervals are [1000,5000], [100,500], [100,500], [100,500], and the values seesm to be integer, then the whole factor space equals 4000 * 400 * 400 * 400, which is quite huge. And the authors present one setting for NASBench201 and other experiments, respectively, so I am wondering whether there is some method or strategy to choose such factors? \n\n2, This work extends the basic motivation of DiffusionNAG, which is rather good and natural. Such extension include three more factors, including number of parameters, number of MACs, and the inference latency. But I am curious that, how about the performance of POMONAG if just considering adding one factor? \n\n3, From one factor, say, accuracy, to three more factors seems strenghening the proposed POMONAG, but my question is, the working mechanism of DiffusionNAG and POMONAG the same different? Although the two diffusion processes consider different factors, which is the obvious difference, but the analysis or discussion is important to interpret this issue."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper introduces the ParetoOptimal Many-Objective Neural Architecture Generator (POMONAG), extending DiffusionNAG through a many-objective diffusion process. POMONAG simultaneously considers accuracy, the number of parameters, multiply-accumulate operations (MACs), and inference latency. The experiments validate the performance of the proposed model."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is a direct extension based on DiffusionNAG, which can deal with multi-objective optimization in NAS. These objectives include accuracy, the number of parameters, multiply-accumulate operations (MACs), and inference latency. This motivation is good and natural, and the authors expressed their work clearly, from the motivation to the experiments results. Some details need to be clarified."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1, The multi-objective optimization problem formulation in this work can be given first, which then can be solved by the proposed weighted factors in the reverse diffusion process. But maybe the authors can consider other ways to sovle this. For example, using four single reverse diffusion process each targeting one factor, as DiffusionNAG did, then using multi-objective optimization for further trade-off may also work well.\n\n2, The theoretical analysis should be strehghen. One objective to many objective is a breakthrough, but such process needs more analysis or discussion. Current work lacks such in-depth thinking. \n\n3, Several predictors are needed in this work, but the detailed information these predictors are missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses. If the concerns raised are well addressed, I am glad to increase my rating."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1) The idea the overall framework of the proposed POMONAG method is simple and easy to understand. \n2) The details of the method and experiments are clearly stated. \n3) Generating neural architectures in the multi-objective manner is an important research topic."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the POMONAG method to generate neural architectures in the multi-objective manner. Specifically, the overall framework of POMONAG is designed based on that of DiffusionNAG, in order to achieve better performance in terms of number of parameters, MACs, and inference latency beyond the accuracy. There are four key parts designed to achieve this goal, i.e., the many-objective reverse diffusion guidance, the meta-dataset, the score network and performance predictors, and the pareto front filtering and stretching. The experimental results in NAS-Bench-201 and MobileNetV3 search spaces demonstrates the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) My major concern is about the motivation of this work. Specifically, there are four objectives considered, i.e., the accuracy, the number of parameters, MACs, and the inference latency. However, the last three objectives do not demonstrate conflict relationship. For instance, the smaller number of parameters seems certain to lead to lower inference latency. In this case, the necessity for adopting multi-objective optimization is limited. \n2) The novelty of the proposed method needs further discussion. Specifically, the proposed method seems to build on DiffusionNAG with the cooperation of the multi-objective optimization. It seems that the POMONAG is just a simple combination of these methods. More discussions in terms of the seminal contribution of POMONAG is needed. \n3) How the hyperparameters $k_{\\phi}$, $k_{\\pi}$, $k_{\\mu}$, and $k_{\\lambda}$ determined? It is suggested to provided more details in terms of the hyper-parameter study for these hyperparameters. \n4) The search cost of POMONAG is not well presented. In the pipeline of POMONAG, I think the pre-training process, the training of the score network, and the training for the performance predictors will introduce much additional search cost beyond the architecture generation. However, I cannot find any details about the overall search cost and the search cost for the above components. \n5) I am curious about why only one trained architecture is enough for POMONAG? Maybe more discussions or analysis are helpful to give more insights for this point. \n6) Lack of experimental results on more challenging tasks (i.e., the classification accuracy on ImageNet-1K). More results on such datasets are helpful to enhance the experiments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "How did you choose the search spaces to apply POMONAG? \nThe algorithmic contribution seems like a limited extension of DiffusionNAG. What complication arose from integrating multi-objective NAS into DIffusionNAG?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Strong writing, ideas are explained well and thorough\nThe experiments are presented well and results are thorough\nNovelty is presented in 2 algorithmic improvements and the contribution of a multi-objective meta dataset"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work presents an extension to DiffusionNAG and incorporates multi-objective search. Model complexity, computational efficiency, and inference latency are key measures captured through number of parameters, MACs, and latency estimation. These measures are recorded in a meta dataset for NASBench201 and MobileNetV3 with 10k and 20k architectures respectively. During search, pareto front filtering segments three regions corresponding to high accuracy, high efficiency, and best balance of the two using the auxiliary metrics from earlier. The experimental results are promising across a sufficiently diverse set of benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "For transferable NAS, the choice of benchmarks are interesting, TransNASBench provides a NAS dataset specifically for transferability in NAS. Exploring performance on this dataset would have been nice\nMobileNetV3 and NB201 are also fairly dated search spaces, performance in more recent search space or architecture styles (vit) should be explored\nThe specific details of the algorithmic contribution are a bit vague. How is pareto front filtering done? \nImageNet results are sparse and comparison to modern NAS methods on this benchmark are sparse"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "POMONAG is a dataset-aware Transferable Neural Architecture Search technique for Pareto-Optimal Many-Ojective generation of state-of-the-art efficient neural architectures."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024pomonag,\ntitle={{POMONAG}: Pareto-Optimal Many-Objective Neural Architecture Generator},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4y4t7yOvJO},\nnote={under review}\n}"
},
"abstract": {
"value": "Neural Architecture Search (NAS) automates the design of neural network architectures, minimising dependence on human expertise and iterative experimentation. While NAS methods are often computationally intensive and dataset-specific, employing auxiliary predictors to estimate architecture properties has proven extremely beneficial. These predictors substantially reduce the number of models requiring training, thereby decreasing overall search time. This strategy is frequently utilised to generate architectures satisfying multiple computational constraints.\nRecently, Transferable Neural Architecture Search (Transferable NAS) has emerged, generalising the search process from being dataset-dependent to task-dependent. In this domain, DiffusionNAG stands as a state-of-the-art method. This diffusion-based method streamlines computation, generating architectures optimised for accuracy on unseen datasets without the need for further adaptation. However, by concentrating exclusively on accuracy, DiffusionNAG neglects other crucial objectives like model complexity, computational efficiency, and inference latency -- factors essential for deploying models in resource-constrained, real-world environments.\nThis paper introduces the Pareto-Optimal Many-Objective Neural Architecture Generator (POMONAG), extending DiffusionNAG through a many-objective diffusion process. POMONAG simultaneously considers accuracy, the number of parameters, multiply-accumulate operations (MACs), and inference latency. It integrates Performance Predictor models to estimate these secondary metrics and guide the diffusion gradients. POMONAG's optimisation is enhanced by expanding its training Meta-Dataset, applying Pareto Front Filtering to generated architectures, and refining embeddings for conditional generation. These enhancements enable POMONAG to generate Pareto-optimal architectures that outperform the previous state-of-the-art in both performance and efficiency.\nResults were validated on two distinct search spaces -- NASBench201 and MobileNetV3 -- and evaluated across 15 image classification datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neural Architecture Search",
"Many-Objective",
"Pareto-Optimal",
"Meta-Dataset",
"Transferable Neural Architecture Search"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8ce7489c940467a310a9d0b8e1f97a8a1a529b97.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "POMONAG: Pareto-Optimal Many-Objective Neural Architecture Generator"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4y6Q98hJzr | Towards Efficient and No Forgetting Domain Continual Pretraining by Mitigating the Stability Gap | main | Active | Continual pretraining | foundation or frontier models, including LLMs | 3;3;5;5 | 4;3;4;5 | 1;3;2;3 | 2;2;2;2 | 2;3;1;3 | 4 | 4 | 2.25 | 2 | 2.25 | 0.707107 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Line 288: `... of each sample in the entire medical corpus.' what does each sample indicate (documents, QA pairs or anything else?)? Are the samples drawn from the only the dataset being evaluated or all of them combined?\n\n- How was the 50B domain text obtained from wiki-medical-terms? The website seems to indicate that the corpus has 6k medical terms and their descriptions. Does the whole terms + description have 50B tokens? Any other relevant statistics?\n- The paper's main contribution seems to arise due to creating the High Quality (HQ) partition using KenLM- Could the authors add more information about how this was performed? For e.g., what were size of _n_ if an n-gram-based approach was used?\n- Creating HQ partition could have been done in other ways- entropy, ranking or using MLE for importance. Can the authors comment why KenLM was chosen. Can they compare this selection with others? Do they work in similar ways/show similar performance?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is reasonably well organized and written. \n- The findings are well explained and justified with empirical analyses where required.\n- The authors conduct extensive experimentation to cover different possible research questions\n\n- The concept of stability gap is not new and has been extensively studied in Computer vision but relatively less in NLP. The paper draws it's research question from this and possible solutions from CV. The paper compares its proposed strategies with existing work, e.g. Ibrahim et al (Rewarm and decay), Replay (Chen et al) etc."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The manuscript focuses on the problem of stability gap- i.e., LLMs dropping their performance drastically when continually pretrained on a new domain and then recovering performance gradually. The manuscript demonstrates stability gap using the medical domain using relatively smaller language model and proposes (three) strategies to overcome and stabilize pre-training loss- (1) continual pre-training with a random partition of the domain across multiple epochs (2) continual pre-training using a notion of high-quality tokens selected using KenLM (3) Utilizing existing pre-training data-mixture ratios to selectively replace the current corpora with target domain corpora. The manuscript then applies the strategy to Llama-3-8B- in continually pretrain and fine-tuning settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper uses DEITA and KenLM for assessing the quality of samples in the target domain.\n\n Need a baseline with only Continually pretrained with all data (all data vs only 50B) vs proposed strategy\n- Table -1: The\tperformance vs 10B replay is pretty close. The performance difference seems to solely arise due to MedMCQA;\n may need statistical significance tests to see if the differences are due to proposed strategies or due to randomness.\n- Table 2: >20% performance jump again on MedMCQA for Physician vs LLaMa-3-8B Fine-tuned seems odd. Are there any possible explanations, especially the difference in performance for other datasets <5%. (Please add statistical significance tests- see last bullet)\n-\tPerformance could be possibly validated using statistical significance tests- either using permutation or signed rank tests. see- https://aclanthology.org/D12-1091/"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Why is the performance substantially better for your strategy on MedMCQA? The tasks, performance gains seem more mixed and not necessarily as beneficial. What about MedMCQA benchmark makes it benefit the most from the continual pre-training?\n* Does this technique work for other datasets? In looking at the legal dataset results in Appendix F, there are similar findings suggested for the zero-shot but the experimental comparisons."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper derives insights from stability gaps introduced in the context of visual models for continual learning to explain the behavior of performance drops with LLM continual pre-training for the specialized domain.\n* Evaluation results with various biomedical-domain fine-tuned LLMs and QA datasets demonstrate the potential of the strategy.\n* For some tasks and datasets, there is a noticeable improvement using less number of training tokens, especially on the MedMCQA task."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper suggests that performance instability when training LLMs for specialized domains arises from distribution shifts. As such, they propose a new continual pre-training strategy that incorporates data quality and corpus distribution to identify \"better\" samples. In addition, the idea is to use these better subsets of samples and train for more epochs to ensure the LLM is in the performance recovery phase. The authors illustrate their performance on 4 benchmark QA datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The base architecture used is only the OpenLlama3B model with a single parameter size. The natural question is whether such a strategy is applicable across various LLM families and sizes (for example, GPT-NeoX was used by Ibrahim et al. with a 10B parameter model which might be comparable to the 8B rather than the 70B). Can you provide a comparison against GPT-NeoX 10B to provide a meaningful evaluation of your strategies?\n* The motivation for the learning strategy is under a fixed computational budget, which seems to be only related to the number of training tokens and not the number of epochs. Can you explicitly define computational budget and then evaluate a scenario where token count and epoch count are kept constant to better understand the tradeoffs when considering a computational budget? This is a more elaborate setting than Section 3 which only assessed 5 epochs. \n* The methodology, efficient learning strategies, and evaluation sections, all seemingly blend together without necessarily a coherent story or separation of sections. For example, in section 3, the differences between the two subsections seem to blend together whereas it would have been better to introduce the stability gap and demonstrate that the instability that is often observed seemingly is explained in the context of this, and should be done for one common set of experiments (note that there is swapping between medical domain, common sense task performance but with very little context for these experiments until Section 5). Section 4 seems to be more of an ablation study rolled in with their own method. As such, while it seems like the authors have done a lot of reasonable experiments, untangling what they are introducing and evaluating is very hard to understand without multiple reads. My suggestion would be to reorganize so that both section 3 and 4 are one contiguous section, where the first subsection focuses on motivating the stability gap in the context and then providing the strategy to mitigate this by choosing higher-quality samples. Section 5 can then focus on experiments where they are concisely targeting specific aspects of the strategy.\n* There are a lot of results, but limited discussion about them, especially comparison of performance. Please provide a more detailed discussion of your results. Moreover, it would be helpful to clarify which fine-tuned models may not be tuned on the same task so the performance might be hindered by this, whereas others might be fine-tuned on the task so it might be reasonable to expect them to do well. To accommodate this expansion, space can be made by shrinking some of the figures.\n * Some of the graphs do not provide sufficiently more information. For example Figure 2 (b) reports the beginning of only 1 model for the millions of tokens, but the trend doesn't seem to be that much more informative than Figure 2a. Similarly, much of the motivation was for specialized domain but there is only a focus on medical domain whereas it would have been more compelling with Appendix B results embedded here."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. I am not clear that how the high quality is obtained from original medical corpus.Can you further explain the quality evaluation metric for the data selection? \n2. The figure 4 (a) is not clear to me. What's the x-axis represent? Can you further explain this Figure 4(a) and your finding? \n3. The mixture strategy confused me. Can you further explain the mixture strategies? Specifically, \n\"we follow the Llama mixture rate (Touvron et al., 2023a) to collect 5 billion tokens initially. We then replace the CC and C4 data (82% of the 5 billion tokens) with medical tokens sampled from the highest quality 5 billion medical tokens (HQ-5b). \" \nWhat's the initial 5 billion tokens ? How you further replace the token. \n4. Is stability gap existing on larger models? like 13B or larger models? Could you further conduct experiments on larger model to show the importance of the proposed issue?\n5. Strategy 1 trains more epochs on smaller dataset may have higher chance to overfit. Can you further compare the continual training's performance on other OOD benchmark to show the overfitting issue (e.g. DROP, GSM8K, HumanEval etc).\n6. Does the ' stability gap' changed by using different learning rate and warm-up strategies?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The paper's motivation is clear, with well-structured on problems in continue pertaining, proposed strategies, and results. \n2. Authors conducts experiments on different benchmarks across medical and law, to show the effectiveness of proposed methods in the continue-pretraining."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the LLM behaviors when adapting to specific domains via continual pre-training. Authors point out unexpected \"stability gap\", which is an initial drop in performance before subsequent recovery. Authors provides three training strategies on this unexpected trend and conduct experiments on medical and law benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed strategies seems similar with existing works conclusions. For examples, high quality data is important for model training [1,2] , using similar data mixture rate to the pre-training data to alleviate data distribution shift [3,4].\n2. The experiments only conduct on relatively small models. The gap may be due to the the small model is not robust enough on the new dataset. It is. unsure that if the larger models ( for example, 13B, 70B ) meet the same issue on continue pertaining.\n3. The IFT model comparison is unfair to me due to some IFT models do not tuned on specific training dataset and they have different base models. \n4. It is unsure that if the proposed IFT models is overfitting into the evaluation dataset by building IFT dataset based on original training data.\n\n\n[1] Chen at al (2023). AlpaGasus: Training a Better Alpaca with Fewer Data\n[2] Zhou et al (2023). LIMA: Less Is More for Alignment\n[3] Parmar et al (2024). Reuse, Don't Retrain: A Recipe for Continued Pretraining of Language Models.\n[4] Ibrahim et al (2024). Simple and Scalable Strategies to Continually Pre-train Large Language Models"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1) The stability gap concept proposed by previous studies is about the inability to maintain performance on **prior** tasks and the one mentioned in this paper is about the performance in the **new** target task. How are they two related in your experiments?\n2) The initial drop in the averaged accuracy of the LLM on the medical tasks looks very insignificant.\n\t- Have you done a statistical test to verify this? \n\t- Is the small drop (<1%) in line with the findings of previous stability gap studies?\n3) Data Mixture Results (Figure 4b and 4c):\n\t- The authors may need to compare the proposed strategies with the baseline (full data with multiple epochs).\n\t- The average medical and commonsense performance seems to drop in the 5th epoch. Why is that the case? What would happen if you continue the pretraining to 6th, 7th, ... epoch?\n4) How similar is the \"high-quality\" medical reference corpus to the downstream tasks?\n\t- If you run the KenLM model on the downstream datasets, what is the perplexity? Would the perplexity be very low too?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The proposed strategies are easy to implement.\n- The LLM fine-tuned with the proposed strategies achieved the highest averaged accuracy score on a suite of medical question answering tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper uses the concept of stability gap to explain the initial drop in LLM performance during continual pretraining in a new domain. The authors propose three training strategies to address the initial instability: 1) continually pretrain the LLM on a properly sized corpus subset for multiple epochs; 2) Continually pretrain LLM on a high-quality corpus subset; 3) Using data mixture rate that is similar to the pretraining data. The proposed strategies improve the accuracy of the LLM in the new domain when compared to the existing continual pretraining techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "## Major\n- It is important to justify the methods of Muennighoff et al. (2024) and Lin et al. (2024) used in this paper. (I assume the four subsequent sentences explain the method (L158-161)). Here are some missing details:\n - Why was KenLM chosen?\n - What is a \"high-quality medical reference corpus\"? how do you define it? This is a fairly critical point because the \"highest-quality\" medical corpus can also be defined as those that resemble the downstream tasks the most, which makes the findings more expected (the closer the continual pretraining data is to the downstream tasks, the better the model will perform in the downstream tasks).\n- The authors claim that the average accuracy of the LLM on the medical tasks initially drops and rises during the continual pretraining.\n\t- However, the drop itself does not look significant (less than 1% averaged accuracy). This makes the observation less strong. (See Question 2)\n- This paper contains a flawed assumption due to the lack of access to the pretraining corpus. If a stability gap was proposed to explain the ability of the model to maintain performance on previous tasks, such an analysis cannot be achieved if we do not have access to the pretraining corpus.\n\t- The authors claimed (L233-235) that language modelling loss also preserves general knowledge and text modelling capabilities, which is a big assumption that is not backed by any evidence.\n\t- Note that text modelling capabilities may still be preserved via language modelling loss during the domain adaptation (continual) pretraining, however, we cannot guarantee that the general knowledge is still being preserved.\n\t- Additionally, there is no guarantee that the continual pretraining corpus was not included in the pretraining corpus. To examine this, the authors may have to conduct a pretraining from scratch.\n- There exists a logical gap between the concepts of relative weight update, stability gradient, and instruction-following ability.\n\t- The authors concluded that the relative weight update indicates the stability gradient and, in turn, instruction-following ability (L241-253). However, there is no guarantee that relative weight update relates to stability gradient, let alone instruction-following capability.\n - Additional experiments using pretraining from scratch may help understand this phenomenon better.\n- There are several mentions of a \"properly sized\" subset. However, they are not properly defined.\n- The performance improvement (Figure 4) when compared to the baseline seems to be <1%. This does not look very significant.\n\n## Minor\n- Note that the submission and paper titles are different\n- Abstract is generally filled with jargon which makes it harder to follow.\n- L50-51: The last sentence of paragraph 1 in the Introduction can benefit from some citations.\n- L56: Missing citation for \"Previous research\"\nThe introduction section still contains a lot of undefined jargon (i.e., \"proper size\", \"highest-quality tokens\", \"data mixture\")\n- L194: Concluding that the \"LLM has acquired medical domain knowledge\" based on the perplexity score is a bit of an overclaim. Consider rephrasing it.\n- Table 2: This misses the performance of the Llama-3-8B models without fine-tuning.\n- The authors claim that the proposed strategies are computationally more efficient. By how much exactly? What metrics should you evaluate this on?\n\n## Very minor (e.g., typos, etc)\n- Use consistent verb tense (many inconsistent uses of present and past tenses)\n- Typo L15: \"phrase\" -> \"phase\"\n- L68: Instead of \"harness\" perhaps \"mitigate\" it? since you would like to mitigate the stability gap as opposed to harnessing it.\n- Typo L125: lowercase \"Language models\"\n- Typo L125: \"RoBERTa\"\n- Page 4: Perhaps observations 1 and 2 can be swapped because in practice we may not know the downstream tasks during the (continual) pretraining phase.\n- Figure 6b: The caption does not seem to be correct. The figure seems to show accuracy during law continual pretraining, while the caption is about relative parameter updates during the medical continual pretraining process."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Efficient and No Forgetting Domain Continual Pretraining by Mitigating the Stability Gap},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4y6Q98hJzr},\nnote={under review}\n}"
},
"abstract": {
"value": "Adapting Large Language Models (LLMs) to specialized domains like medicine and law through domain continual pre-training has become the cutting-edge method. However, contrary to our expectations of immediate gains, we’ve uncovered a surprising phenomenon: a temporary performance drop at the start of the process, followed by a performance recovery phrase. This drop is not only unexpected but remarkably consistent across different model sizes and domains, such as medical and law. To gain a deeper understanding of this issue, we introduce the concept of stability gap—borrowed from visual models dealing with new class classifications—to explain this initial drop in LLM performance. Based on this concept, we hypothesize that the initial performance drop arises from instability in the model’s general abilities, which we further validated through our experiments.\nWe further reveal that this initial instability is intricately tied to training settings that involve distribution shifts.\nTo address this initial instability and enhance LLM performance within a fixed compute budget, we propose one training strategy that reduces the instability by increasing the epoch number, along with two data sampling strategies focused on data quality and corpus distribution.\nWe conduct various experiments on Llama-family models to validate the effectiveness of our strategies in both medical and legal continual pre-training and instruction tuning. For example, our strategies improve the average medical task performance of the OpenLlama-3B model from 36.2\\% to 40.7\\% with only 40\\% of the original training budget and enhance the average general task performance without causing forgetting. \nFurthermore, we apply our strategies to continually pre-train and instruction-tune the Llama-3-8B model. The resulting model, Llama-3-Physician, achieves the best medical performance among current open-source models and performs comparably to or even better than GPT-4 on several medical benchmarks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Continual pretraining"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ac9215997c666de4c1a5b41a9b1c0a334f57d141.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Efficient and No Forgetting Domain Continual Pretraining by Mitigating the Stability Gap"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4ymHtDAlBv | Fast Salient Factor Concentration (FSFC) Recurrent Neural Network for Text Classification | main | Withdraw | Text Classification;Semantic Information Clustering;Recurrent Neural Network | learning on time series and dynamical systems | Weihao Xia;Huachuan Wang;Qiu Chen;Junlong Ma;James Ting-Ho Lo | ~Weihao_Xia2;~Huachuan_Wang3;~Qiu_Chen1;~Junlong_Ma3;~James_Ting-Ho_Lo1 | 1;3;3 | 3;4;4 | 1;2;1 | 2;1;1 | 1;2;2 | 2.333333 | 3.666667 | 1.333333 | 1.333333 | 1.666667 | 1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Thank you to all the reviewers for their time and valuable feedback. The original intent of our method was to replace the basic LSTM or GRU modules in various complex models to accelerate training and inference speed. There are aspects of the paper that need improvement, and after further discussion, we have decided to withdraw the manuscript for further refinement. Once again, we sincerely appreciate the reviewers' efforts."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- Easy to follow and clearly written;\n- Lightweight solution for text classification compared to transformer models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an RNN-based model called Fast Salient Factor Concentration (FSFC), designed to improve training efficiency in text classification by using short-term memory and semantic clustering. While FSFC's concept is intuitive, it has several critical limitations. The approach offers limited innovation, appearing incremental relative to existing RNN and attention-based techniques. Additionally, the research feels outdated, as NLP has largely moved toward transformer-based models. Performance evaluations yield mixed results: FSFC reduces training time but sacrifices accuracy on some datasets. Moreover, baseline comparisons are insufficient, with FSFC only benchmarked against LSTM and GRU models, excluding state-of-the-art transformers like RoBERTa and recent LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited novelty: The concept of text compression and segmentation is well-explored, with similar techniques in attention mechanisms and memory simplification.\n- Outdated approach: It’s advisable for the authors to employ BERT-based models or LLMs, as transformers are now the NLP standard. Although addressing transformer complexity with RNNs is a good direction, applying it to basic tasks like text classification feels outdated."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Does your model reduce the inference time?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "This paper proposes a structure that is 5 times faster to train than LSTM."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a method to remove the gating mechanisms of LSTM and condense memory. The proposed method achieves better training speed while retaining accuracy."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It is tough to understand Figure 1. I can not find any RNN or recurrent in this figure. What is your whole network? Moreover, this figure doesn't have detailed captions.\n\n2. Why does long-term memory not matter in classification? Please provide justifications with citations or experiments.\n\n3. This paper does not compare with other improvements on LSTM. Do other improvements over LSTM reduce the training time and achieve better accuracy?\n\n4. This paper does not provide details about the experiment setup. Although we do not know what the network structure proposed in this paper looks like, nor do we know how the configuration compares to GRU and LSTM, or the length of the experimental data.\n\n5. E-score is not more effective than just presenting accuracy and time."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What is the motivation to propose an alternative to RNN for text classification? What problems are you trying to solve? Does \"the long-term memory mechanisms of traditional RNNs do not fully align with human cognitive learning processes\" really matter? How does it impact the performance in text classification tasks? Could you give us some concrete examples of how the misalignment between RNNs and human cognitive processes impacts performance in text classification tasks?\n\n2. What is the contribution of FSFC? The author should discuss how FSFC compares to or complements more recent approaches in NLP. RNN-based methods for text classification are quite outdated now. While I am not against RNNs, achieving minor improvements and reducing training time seems to bring little new knowledge to the current NLP community.\n\n3. How about employing other methods like BERT and LLMs? What is the value of an alternative to RNN compared with pretrained language models? Why do we still need RNN-based methods for text classification?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-organized, and the writing is fine.\n\n2. The paper evaluates the performance of FSFC on four datasets. The experimental results show the effectiveness and efficiency of FSFC."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes Fast Salient Factor Concentration (FSFC) RNN, a new architecture for classification tasks, to enhance the processing of crucial information by dynamically clustering and compressing semantic information. The performance on YelpReviewFull proves that FSFC has a higher accuracy with less training time."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation of this paper is not very clear. I'm not sure what problems this paper is trying to solve (Text classification? An alternative to RNN? An alternative to RNN on Text classification?). It would be beneficial to specify what problems the authors aim to address—are they focusing on text classification, proposing an alternative to RNNs, or something else? Anyway, the contribution seems limited.\n\n2. The idea presented in this paper does not appear particularly interesting. It proposes an alternative to RNNs for text classification, but I believe it lacks significant contributions to the current NLP community. Nowadays, many practitioners favor pre-trained models over RNNs for text classification tasks. Additionally, large language models (LLMs) tend to focus on multi-tasking rather than on single NLP tasks.\n\n3. If the goal is to propose a new RNN, conducting experiments only on text classification is insufficient to verify the method's generalization.\n\n4. Even for text classification tasks, the models compared in the paper are not comprehensive (e.g., ELMO, BERT, LLMs, and so on). The paper lacks comparisons with these strong methods for text classification. As I mentioned in Weakness 2, these models are precisely the kinds of models that are commonly utilized in the field of text classification today. If the paper focuses on text classification, not comparing with these mainstream models would be unfair."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nxia2024fast,\ntitle={Fast Salient Factor Concentration ({FSFC}) Recurrent Neural Network for Text Classification},\nauthor={Weihao Xia and Huachuan Wang and Qiu Chen and Junlong Ma and James Ting-Ho Lo},\nyear={2024},\nurl={https://openreview.net/forum?id=4ymHtDAlBv}\n}"
},
"abstract": {
"value": "Models based on Recurrent Neural Networks (RNNs) have been widely employed for text classification tasks. Traditional RNNs primarily emphasize long-term memory capabilities. However, this approach does not fully align with human cognitive learning processes, particularly in the context of classification tasks. The human brain typically extracts essential information relevant to the classification categories, disregards irrelevant details, and compresses the input to accelerate decision-making. Inspired by this, we propose a novel architecture, the Fast Salient Factor Concentration (FSFC) RNN, specifically designed for classification tasks. FSFC dynamically clusters and compresses semantic information by leveraging the short-term memory capabilities of recurrent neural networks. Experimental results demonstrate that FSFC achieves performance comparable to existing RNNs, while significantly improving training efficiency in classification tasks. Based on the YelpReviewFull dataset, FSFC improves accuracy by 1.37% over Long Short-Term Memory (LSTM), while reducing training time by 86%. Additionally, we propose a new evaluation metric, E-score, which integrates both accuracy and time efficiency to comprehensively assess the overall performance of each network."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Weihao_Xia2",
"~Huachuan_Wang3",
"~Qiu_Chen1",
"~Junlong_Ma3",
"~James_Ting-Ho_Lo1"
]
},
"authors": {
"value": [
"Weihao Xia",
"Huachuan Wang",
"Qiu Chen",
"Junlong Ma",
"James Ting-Ho Lo"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Text Classification",
"Semantic Information Clustering",
"Recurrent Neural Network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "xia|fast_salient_factor_concentration_fsfc_recurrent_neural_network_for_text_classification"
},
"pdf": {
"value": "/pdf/6c754b36fb662482c6e84cee434cb837a190bbed.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Fast Salient Factor Concentration (FSFC) Recurrent Neural Network for Text Classification"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
4ytHislqDS | IFORMER: INTEGRATING CONVNET AND TRANSFORMER FOR MOBILE APPLICATION | main | Active | Lightweight Networks;Efficient Networks;Vision Transformers;Classification | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 5;6;6;6;8 | 5;2;4;4;3 | 3;2;3;3;4 | 3;2;3;3;3 | 3;2;3;3;3 | 6.2 | 3.6 | 3 | 2.8 | 2.8 | -0.520416 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the motivation and justification for the necessity of the design that replaces half of the third stage and full last stage conv blocks with transformer blocks? Please refer to Weaknesses 1. \n\n2. I wonder what is the performance and latency of MHA as the missing step between 'kernel sz.' and 'SHA' in Figure 2."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper is well-organized and easy to follow. Detailed design specifications and comprehensive experiments enhanced the integrity of the article and demonstrated its contributions.\n\nThe main contribution, SHMA, provides a new approach to designing efficient attention and Transformer blocks. The resulting iFormer series outperforms sota baseline mobile networks with stronger performance and lower latency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper designs iFormer, a new family of efficient mobile vision networks combining ConvNet and Transformers. The iFormer evolves from ConvNeXt with a series of efficiency designs. \n\nSingle-Head Modulated Attention(SHMA) is proposed as substitutional Transformer blocks to replace part of the Conv blocks in later stages of the enhanced ConvNeXt. SHMA replaces multi-head attention with single-head attention to improve efficiency and introduces a modulation mechanism to boost performance. \n\nThe resulting iFormer series achieves the best performance compared with state-of-the-art mobile-level models on different downstream tasks with lower latency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1: \n\nThe motivation and necessity of substituting half of the conv blocks at the third stage and all blocks at the last stage into Transformer blocks in ConvNeXt are still not very clear. From Figure 2, changing the conv blocks into SHA blocks gains a 0.4% improvement in performance but is also 0.12 ms (about 10%) slower. I'd like to know further explanation for this design and ablation studies on the choice of stages or different ratios of Conv versus Transformer blocks if possible. \n\n\nW2:\n\nAccording to the citation of SHViT in this paper, I suppose the SHA refers to the Single-head self-Attention in SHViT design. But in Figure 4, full channels of input (CxHxW) are projected to Q/K/V (CxL) which does not align with the design of SHA in SHViT but looks like the traditional definition of Single-head attention that performs a self-attention on all channels of input using a single head. \n\nConsidering there are limited words about the details of SHA in this paper, I would expect further specification of which SHA is used in iFormer and comply with the pipeline figure accordingly. \n\nW3:\n\nIn this paper, the additional reshaping operations in MHA are considered as the reason for the slower inference speed compared with SHA. But multiple factors have an impact on the runtime speed difference and there's no evidence to support the extra runtime only or mainly comes from extra reshapings. \n\nFirst, depending on the code implementation, replacing MHA with Single-head self-Attention may remove the reshaping operation in self-attention, but also introduce additional split and concat operations. And generally, split and concat operations cost more memory and are slower than reshape. \n\nSecondly, SHA applies self-attention on fewer channels, which largely reduces the computational cost and speeds up runtime. \n\nTherefore I suggest the authors conduct an ablation study or provide empirical evidence to isolate the impact of reshaping operations versus other factors like split/concat operation and reduced self-attention channels on the inference speed. This would help clarify the main factors contributing to SHA's efficiency and provide a more comprehensive understanding of the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "please refer to weakness"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The study of model architecture could inspire further exploration in designing more efficient architectures.\n2. The paper is well-organized and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new family of mobile hybrid vision networks. By integrating the rapid local representation capability of convolution with the efficient global modeling ability of self-attention, the proposed architecture, iFormer, achieves significant performance in classification and several downstream tasks, while maintaining low latency on mobile devices for high-resolution inputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Table 1, iFormer-S achieves the same latency as RepViT-M1.0 with slightly fewer parameters, yet in larger variants, iFormer achieves lower latency with substantially more parameters compared to RepViT. What is the reason for this difference?\n2. Some studies are not included in the comparison or the related wotk section, such as [1, 2].\n\n[1] Cmt: Convolutional neural networks meet vision transformers.\n\n[2] Learning efficient vision transformers via fine-grained manifold distillation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The largest model shown by iFormer, iFormer-L, is only about 15M, which isn’t considered large, even for edge devices, especially since recent edge LLMs can reach 1B parameters. I wonder how well a larger iFormer (around 100M) would perform."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "I think the logic of exploration in this article, starting with ConvNeXt, first “lightening” the ConvNeXt to create a streamlined\nlightweight network, then exploring the attention module, is reasonable. \n\nI think the analysis about “cosine similarity between multiples” proves that using a single attention is good and worth supporting. \n\nI think the experiment reported in this paper is comprehensive (imagenet, coco, ade-20k). The paper also reports some knowledge distillation results, which is suitable in mobile network papers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a mobile hybrid vision network, iFormer. The paper goes from ConvNeXt to a lightweight mobile network. iFormer removes memory-intensive operations in MHA and employs an efficient modulation mechanism. The author conduct standard benchmark experiments on ImageNet, COCO and ADE20K."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1: \nSingle head self-attention has been conducted in \"Shvit: Single-head vision transformer with memory efficient macro design\" .\n\nAlternative to standard self-attention has been conducted in GhostNetV2.\n\nModulation in the token mixer module has been conducted in Conv2Former. \n\nThis paper references many related methods, and while that is one approach, I don't think it stands out. Although such research is a decent format, I believe it impacts the novelty of this paper. \n\n2: \nThe process of evolving from the ConvNeXt baseline to the lightweight iFormer may not apply to slightly larger models, and some steps show very minimal improvements, making them hard to justify."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is easy to follow, with clear writing and presentation.\n2. Evaluation results are comprehensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a new family of mobile hybrid vision networks, called iFormer, by integrating the fast local representation capacity of convolution with the efficient global modeling ability of self-attention."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. How does this method compare with neural architecture search (NAS) methods?\n\n2. How does the designed model perform on other mobile devices, such as NVIDIA Jetson Nano or Raspberry Pi?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. fast local representation capacity of convolution and the efficient global modeling proficiency of the proposed SHMA\n2. A series of novel techniques such as stack of overlapping convolution instead of aggressive non-overlapping patch in the early layers\n3. The model is structured in four stages. The early stages use fast convolution to capture local features efficiently, using a modified and lightweight version of ConvNeXt optimized for mobile latency.\n4. In the lower-resolution stages, self-attention is used to model long-range dependencies. To address the challenges of traditional multi-head self-attention (MHA), the authors propose SHMA, which uses a single-head attention mechanism to minimize memory costs while retaining high performance. SHMA reduces latency by optimizing reshaping operations and leveraging spatial context interactions. SHMA is combined with a parallel feature extraction branch to enhance feature representation. The outputs from both branches are fused to enable dynamic information exchange, mitigating any performance drop caused by simplifying MHA"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Summary\n\nThis paper proposes a mobile friendly vision network that improves the latency and accuracy by combining the strengths of both CNNs and ViTs. The novel aspect of this work is the single head modulation self-attention (SHMA). This SHMA learns spatial context through optimized self-attention. It takes ConvNext as the base model and improves it further with various techniques. The authors streamline the ConvNeXt architecture, making it suitable for real-time use on mobile devices, such as the iPhone 13, focusing on reducing latency rather than FLOPs or parameter count. The combined techniques lead to more than 80% top-1 accuracy with 1.1ms latency on iphone 13. Overall a great contribution to the research community."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. What is the runtime complexity of iFormer network?\n2. When running on iPhone (mobile device), what is the peak memory consumption? \n3. How long the iPhone charge will last if an iFormer based app is run on certain fps?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024iformer,\ntitle={{IFORMER}: {INTEGRATING} {CONVNET} {AND} {TRANSFORMER} {FOR} {MOBILE} {APPLICATION}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ytHislqDS},\nnote={under review}\n}"
},
"abstract": {
"value": "We present a new family of mobile hybrid vision networks, called iFormer, with a\nfocus on optimizing latency and accuracy on mobile applications. iFormer effectively\nintegrates the fast local representation capacity of convolution with the efficient\nglobal modeling ability of self-attention. The local interactions are derived\nfrom transforming a standard convolutional network, i.e., ConvNeXt, to design a\nmore lightweight mobile network. Our newly introduced mobile modulation attention\nremoves memory-intensive operations in MHA and employs an efficient\nmodulation mechanism to boost dynamic global representational capacity. We\nconduct comprehensive experiments demonstrating that iFormer outperforms existing\nlightweight networks across various tasks. Notably, iFormer achieves an\nimpressive Top-1 accuracy of 80.4% on ImageNet-1k with a latency of only 1.10\nms on an iPhone 13, surpassing the recently proposed MobileNetV4 under similar\nlatency constraints. Additionally, our method shows significant improvements in\ndownstream tasks, including COCO object detection, instance segmentation, and\nADE20k semantic segmentation, while still maintaining low latency on mobile\ndevices for high-resolution inputs in these scenarios. The source code and trained\nmodels will be available soon."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Lightweight Networks",
"Efficient Networks",
"Vision Transformers",
"Classification"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a96c4be844147c134d9a3705bc06802889378e13.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/8e73896496d32bd6c4a9d12e6e9a71691d14677c.pdf"
},
"title": {
"value": "IFORMER: INTEGRATING CONVNET AND TRANSFORMER FOR MOBILE APPLICATION"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4ytRL3HJrq | Nova: Generative Language Models for Assembly Code with Hierarchical Attention and Contrastive Learning | main | Active | large language model;hierarchical attention;contrastive learning;assembly code | foundation or frontier models, including LLMs | 3;5;5;6;8 | 4;3;4;3;3 | 3;3;3;2;3 | 2;2;3;3;3 | 3;3;2;2;4 | 5.4 | 3.4 | 2.8 | 2.6 | 2.8 | -0.703526 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why do the evaluation for code similarity detection use cosine similarity (l. 321) when the objective (l. 212) uses the l2 distance?\n2. What is the underlying metric for the Pass@k in the decompilation evaluation? Exact match, or some more lenient equivalent? It seems wrong to use exact match when, for instance, variable names would be arbitrary.\n3. In Table 4, the second row is exactly the \"Nova-1B\" row of Table 2, but I was under the impression that \"Nova-1B\" was more than just \"DeepSeekCoder + Nova's attention\", in particular the additional training data, and CL objective. Are the numbers off, or the caption, or did I miss something?\n4. When creating the assembly datasets (Appendix A.1), why go all the way to compiling executables, then using `objdump` for disassembling, with the associated possibilities of failure, rather than dump the assembly in the first place with `gcc -S`?\n5. Do you have preliminary results, citations, or intuition behind the \"normalizing\" step of the assembly language performed in Fig. 6, in particular the addition of spaces? Is that necessary?\n\nMinor points:\n1. In the numerator on l. 265, is $f_j^q$ supposed to be $f^q$? or $f_j^p$ for which the substitution wouldn't apply?\n2. l. 300, how many samples do the GPT models perform, then, to be able to compute the Pass@10 in Table 2?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality\n--------------\n1. While hierarchical attention mechanisms are not new, the design of this one is innovative in that: it takes into account the specific format and constraints of assembly instructions, and it accommodates for using regular tokens in the same sequence (e.g., natural text instructions).\n2. The contrastive objective losses, as well, encode a priori knowledge of the underlying data: compilation stages preserve semantics, and optimization stages are sequential.\n\nQuality\n----------\nThe different contributions are overall sensible, and contribute to the performance of the model. Experiments are adequately designed, and support the conclusions of the paper. The additional experiments help understand the role of the different contributions, in particular their effect on how embeddings get clustered and the effect it can have on the final model's performance.\n\nClarity\n---------\n1. The paper includes most of the relevant information, either in the main text or appendix. Relevant literature is mentioned and cited.\n2. Figures and examples make it much easier to understand the logic, especially Fig. 3.\n\nSignificance\n-----------------\n1. This work shows a significant improvement on benchmarks, sustained across model sizes, and adaptable to other models. This is an advancement on an important, developing field of machine learning applications.\n2. Given that these improvements do not require any in-depth change (e.g., to the vocabulary) and are compatible with already pre-trained model make it easier to experiment with in different settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a way of training an LLM to improve its performance on tasks that require understanding of assembly code, in particular code decompilation, and assembly code similarity detection.\n\nThis is achieved by several contributions:\n1. A multi-way, parallel corpus of programs written in C, as well as the corresponding assembly produced by `gcc` with different levels of optimization (0 to 3), used for further training of pre-trained LLMs.\n2. A hierarchical attention mechanism, structured to summarize the content of each instruction into the representation of a single token. This mechanism is compatible with existing models.\n3. Two auxiliary contrastive loss objectives: a \"functionality\" one that minimizes the distance between representations of the same original code, while maximizing the distance between representations of different code pieces, and an \"optimization\" one encoding the fact that further levels of optimization should increase the distance between program representations.\n\nTwo variants (with 1B and 6B parameters respectively) of a model trained with these changes, and further fine-tuned for the task of interest, show a large improvement over state-of-the-art."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Quality\n----------\n1. One of the 3 motivating cases in the introduction, malware detection, is not evaluated or considered at all in the rest of the paper. I understand the scope of the paper needs to end somewhere, but it would have strengthened the paper to include experiments on such a dataset.\n2. Details are missing in how the authors are certain that test data sets (both for decompilation and for similarity detection) do not overlap with any of the training data, including the pre-training data of DeepSeek-Coder, even inadvertently.\n3. An additional ablation on $\\textrm{Nova}_{-CL}$ would have helped see if there are any non-linear interactions between HA and CL.\n\nClarity\n---------\nThe overall organization of the paper could be improved. Many times, a concept, method, or setting is used in context before being formally explained. For instance:\n1. If the \"Related Work\" section is positioned earlier, it would help introduce the baseline models (DeepSeekCoder, LLM4Decompile) that are used in the previous \"Results\" section, as well as attention mechanisms, including LongCoder's, also used earlier.\n2. When describing the new datasets, it should be clear much earlier that \"source code\" really means \"C code\" (in the caption of Table 1, for instance), \"assembly\" is X86 assembly (or maybe X86-64? that's not so clear), that only `gcc` is considered as a compiler, and whether each \"program\" actually means a full executable program, or if it includes functions as well.\n3. Similarly, the contrastive losses mention \"the embedding\" of a function, which is quite ambiguous in transformers, especially if the model family (encoder-decoder?) is not mentioned.\n4. There is also a lot of ambiguity in notation, or the semantics of different objects. For instance:\n * Do Table 1, and Appendix A.2, refer to the original \"AnghaBench\" and \"The-Stack\" datasets, or the new datasets constructed by the authors in Section 2.1? Maybe it would be better to name the new ones.\n * In Functionality CL, l. 208 says it \"optimizes Nova with the constraint\", but a constraint is not a loss or objective. l. 215, \"constraints can be trained\" do not really make sense. It's also not obvious how the loss defined at l. 220 actually implements (a relaxation of) these constraints. It's also not explained if the sum over $f_i \\in F$ is actually done over all the million embeddings in the corpus, or how it's implemented in practice.\n * K is introduced in Section 2.5, and then in 3.3., but we don't know what kinds of values will be used in practice. Also, Table 2 uses \"Pass@K\", but that's not the same K.\n * In captions of Fig. 4 (b) and (d), the tables are more \"designs\" than \"implementations\"\n * In Fig. 4 (b), the 1-4 indices are unfortunate as, for instance, $O0_3$ reads a lot like `-oO3`\n * The equations at l. 220 and l. 266 have a really similar form, but the use of indices $i$ and $j$ is swapped between the two, making it a bit harder\n\nSignificance\n-----------------\nThe results are somewhat limited by the use of a single assembly language, and a single compiler, but this is acknowledged and does not seem like a fundamental limitation.\n\nMinor points\n-----------------\nl. 461: \"cauclated\" -> \"calculated\"?\n\nIn the bibliography:\n- Vaswani et al. is actually from 2017, not 2023 (though the arXiv version has had an inconsequential update in 2023), and a venue should be indicated (I'd suggest NeurIPS rather than arXiv)\n- Other articles are missing a venue or source\n- Several articles have incorrect capitalization in the title due to the lack of curly braces, e.g., use `{CodeT5}` to avoid it being rendered as \"Codet5\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The paper argues that preceding-instruction attention helps avoid reuse of the same register (e.g., \"eax\") immediately after it is used in the previous instruction. However, this motivation is questionable because it does not explain how further subsequent instructions are prevented from reusing the same register. A more straightforward solution could be achieved with inter-instruction attention, as it can attend to all previous instructions, which raises the concern of functional overlap between preceding-instruction attention and inter-instruction attention, thus potentially making the preceding-instruction attention redundant.\n2. While Nova-1B and Nova-6B are much larger than CodeART, their performance gains in BCSD are limited. For example, in the k=100 case, CodeART sees a 17% improvement over JTrans with attention regularization, but Nova's improvement is only marginal, from 0.76 to 0.78 (as shown in Table 12). This suggests that adding hierarchical attention and other inductive biases provides limited benefits when scaling the model, and Tables 11-14 show that removing hierarchical attention does not lead to significant performance drops, questioning its overall necessity. And also in Table 5, the improvement brought by contrastive learning is much higher than the Hierarchical Attention.\n3. In the paper's analysis of attention distribution (Figure 10), the standard attention frequently converges on the first token, a phenomenon known as attention sink [1]. This behavior is also evident in the analysis of hierarchical attention (Figure 10(c, d)), where each token strongly attends to the first token within its attention mask, specifically the [INST-(x-1)] token, which represents the summary of the previous instruction. But it is not common when human try to interpret the functionality of each individual instruction. Furthermore, the justification for the Hierarchical Attention Mechanism —which selectively uses specific attention heads to represent the best attention maps—is somewhat ad hoc and lacks a clearer rationale. \n4. The Hierarchical Attention Mechanism introduced in this paper represents a strong inductive bias; however, the underlying insights behind this inductive bias are not clearly explained. Additionally, the mechanism bears a striking resemblance to the Attention Regularization used in CodeART, with the primary difference being the absence of Preceding-Instruction Attention in CodeART. The effectiveness of this additional attention component has also been called into question earlier in the reivew, casting some doubt on its true contribution to the overall performance.\n5. While the use of contrastive learning aligns well with the BCSD task—improving performance by ensuring that functionally similar binaries, even across different optimizations, are represented similarly—it's less clear how this objective enhances the model's ability in decompliation. The training goal focuses on increasing the similarity of tokens from the same function but compiled with different optimization settings. However, this doesn't seem directly aligned with the ultimate goal of recovering executable source code, which requires more precise structural and semantic understanding beyond just token similarity across optimization levels. It would be greatly appreciated if the authors could provide some intuition as to why this approach can lead to improvements in decompliation.\n6. The authors introduced a novel optimization contrastive learning approach for the BCSD task, which had not been previously applied in the previous works, which commonly use the InfoNCE loss (line 220) or the triplet loss. As it is not discussed with deeper detail in the paper, it raises the question of whether these gains are substantial enough to justify the added complexity and whether this approach could be effectively generalized to improve other models in BCSD tasks.\n\n[1]: Efficient Streaming Language Models with Attention Sinks"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper is well-structured and easy to follow. Concepts such as hierarchical attention and contrastive learning are clearly explained.\n2. The paper proposes a new method for encoding assembly code by using a Hierarchical Attention Mechanism to effectively capture the semantics of assembly instructions, while employing Contrastive Learning to ensure that functionally equivalent assembly code, even at different optimization levels, is represented similarly. This novel combination allows the model to robustly understand and learn from diverse assembly code structures.\n3. The paper conducts a broad range of experiments across multiple tasks and datasets, providing comprehensive evidence of the model’s effectiveness.\n4. Despite its specialized focus on assembly code, Nova's hierarchical attention is compatible with standard self-attention mechanisms, allowing it to seamlessly integrate and benefit from advancements in base models and code generation models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents Nova, a generative language model specifically crafted for understanding and generating assembly code. Nova integrates hierarchical attention mechanisms with contrastive learning to effectively capture the semantics of code. The hierarchical attention mechanism focuses on intra-instruction, preceding-instruction, and inter-instruction relations, while contrastive learning ensures that functionally equivalent code, even with different optimizations, is similarly represented. The model is evaluated on two key tasks: decompilation (recovering high-level source code from assembly) and binary code similarity detection (BCSD) (measuring the similarity between binary code functions). Nova shows superior performance in both tasks, excelling in decompilation by accurately generating source code from optimized assembly, and achieving high recall in BCSD by effectively identifying similar code across different optimization levels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Unclear motivation for introducing several inductive bias by Hierarchical Attention Mechanism. While the added attention mask inductive bias shows promising results in the BCD task, its impact in the BCSD task is minimal. This discrepancy raises questions about why the inductive bias performs well in one task but fails to offer significant improvements in the other.\n2. Lack of Design Discussion. The paper lacks sufficient discussion on key design components like Preceding-Instruction Attention and Optimization Contrastive Learning (CL). Without Preceding-Instruction Attention, the attention design is quite similar to CodeART, raising questions about the novelty and contribution of the approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "For binary similarity detection, compilers may inline functions or eliminate them altogether. How does your approach handle such scenarios?\n\nIf additional information (e.g., execution traces) were provided to GPT, or if iterative interaction with GPT were allowed, could the proposed approach still outperform a GPT-based model?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Clear Writing and Novel Application: The paper is well-written and easy to follow. The idea of applying hierarchical attention to assembly code is interesting and novel. While hierarchical attention is commonly used in NLP tasks, applying this mechanism to assembly code is, to the best of my knowledge, unprecedented.\n\n2. Promising Results: The evaluation results are promising. Nova demonstrates substantial improvements in both decompilation accuracy and similarity detection compared to existing models, validating its approach with strong experimental evidence."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents Nova, a generative language model specifically designed for assembly code, addressing unique challenges posed by the low information density and diversity in assembly syntax due to compiler optimizations. Nova introduces a hierarchical attention mechanism and employs contrastive learning to improve the model's understanding of assembly semantics across diverse optimization levels. Trained on a large assembly corpus, Nova outperforms existing techniques in tasks like binary code decompilation and binary code similarity detection, showing improvements in Pass@1 and Recall@1 rates over state-of-the-art models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Generalizability: The model is trained exclusively on x86 assembly code, which may limit its generalizability to other assembly languages, such as ARM or MIPS.\n\nRealism of Evaluation Settings:\n\n(1) The decompilation prompt requires optimization level information, but it is unclear if this information is accessible in stripped binaries.\n\n(2) For baseline models like GPT, fine-tuning with additional data isn’t necessary, raising questions about the fairness of the comparison. If GPT were given a few-shot learning setup or fine-tuned using OpenAI’s API, could it still be outperformed by the proposed approach?\n\n\nRelated Work: The paper omits discussion of several relevant works, which could provide a broader context for its contributions.\n\n[1] Debin: Predicting Debug Information in Stripped Binaries. CCS 2018\n\n[2] {DIRE}: A Neural Approach to Decompiled Identifier Renaming. ASE 2019\n\n[3] Learning to Reverse DNNs from AI Programs Automatically. IJCAI 2022\n\n[4] Asm2Vec: Boosting Static Representation Robustness for Binary Clone Search against Code Obfuscation and Compiler Optimization. S&P 2019\n\n[5] Neural Network-based Graph Embedding for Cross-Platform Binary Code Similarity Detection. CCS 2017.\n\n[6] ecompiling x86 Deep Neural Network Executables. Security 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please check my concerns in the weakness section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Strengths:\n+ The topic is interesting and important, addressing large language model (LLM) comprehension of assembly code.\n+ The paper is well-structured and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a generative model, Nova, tailored for assembly code tasks. Nova employs a hierarchical attention mechanism and is trained using contrastive learning objectives. This paper evaluates its effectiveness on two assembly code tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n- Comparison may be unfair due to different fine-tuning practices.\n- Evaluation of individual components is insufficient.\n- Generalization assessment is lacking.\n \n(1) Unfair Comparison: Nova is evaluated on two tasks, with fine-tuning applied specifically for each. However, the baseline models (such as Table 2) do not undergo the same fine-tuning for the tasks, leading to a potentially unfair comparison.\n \n(2) Component Evaluation: Nova’s hierarchical self-attention mechanism consists of three components, yet the paper lacks detailed performance assessments for each part. Despite a reasonable design, their individual impact remains unexamined.\n \n(3) Contrastive Learning Objectives: The contrastive learning objectives contain two distinct components. Further evidence is necessary to substantiate the utility of each objective. Additionally, the contrastive learning approach depends on the available optimization levels. Handling unseen optimization levels at inference should be discussed.\n \n(4) Normalization Process: In the data collection section, a normalization step is applied, but its relevance or benefit to Nova’s training is unclear.\n \n(5) Results across different optimization levels should be explored—e.g., training on O0, O1, O2 and testing on O3.\n \n(6) Random Sampling in BCSD Task: The BCSD task employs random sampling, yet statistical results are missing. Reporting such results would reduce the impact of randomness on performance claims."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. See weakness 1,2 \n2. Could you provide more details about how to construct $F$ in practice used in Functional CL?\n3. The authors state that hierarchical attention is only applied to half of the attention head. Since different attention heads can learn different features, I wonder if this setup is robust to the selection of the attention heads?\n4. Would the pre-trained models (Nova-1B, 6B) be public available?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. LLMs for binary code is an important topic to study\n2. This work proposes new methods to train Nova based on the properties of assembly code, which is clearly motivated.\n3. The proposed models show a clear improvement in binary code decompilation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents Nova, a generative LLM for assembly code. To effectively learn from assembly with low information density, it uses a novel hierarchical attention mechanism which combines intra-instruction attention, preceding-instruction attention and inter-instruction attention. It further utilizes contrastive learning to better learn the semantics of assembly code from different optimization levels. The authors demonstrate the superiority of Nova over the baselines on binary code decompilation and code similarity detection tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparison on code similarity detection may not be fair. For example, CodeArt uses 12 transformer blocks with 768 hidden dimensions, whose size is smaller than Nova-1B. The authors should compare Nova with the baseline under a similar size with the same pre-training data to demonstrate the superiority of Nova on code similarity detection. For the current result, we can find that compared with CodeArt, Nova actually does not show a significant improvement (e.g. both are 0.64 for Nova-1B under K=500). So it is in question whether Nova is indeed better for code similarity detection.\n2. The experiments for Comparison with Techniques Handling Long Input are confusing. Specifically, it has the following problems:\n\na) What is \"Nova’s Fine-Tuning\" in Table 3? It seems Nova does not have something special in terms of fine-tuning. Does it just mean fine-tuning with hierarchical attention or also with Nova's pretraining as suggested in Line 360? \n\nb) What is the average token length for downstream tasks before truncation? The authors want to claim Nova is better at solving long input challenges. But I see from the Appendix that Nova uses the input length as 1024 tokens during pre-training and 2048 for fine-tuning. It may be hard to claim this length to be \"long-context\". Considering that assembly code should be much longer than source code and Granite-3B-Code-128K can handle 128K input tokens at most, have you tested in the benchmarks where the input context is longer, e.g. 8k/32k/128k?\n\n3. The presentation of the paper can be improved. Specifically, a) Line 281 is unclear. The authors should clearly state that their pre-training contains two stages and the loss in Line 240 is used in the second stage. b) The ablation study should be separated into new subsections instead of mixing with Section 4.1 c) The equations are not numbered."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024nova,\ntitle={Nova: Generative Language Models for Assembly Code with Hierarchical Attention and Contrastive Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ytRL3HJrq},\nnote={under review}\n}"
},
"abstract": {
"value": "Binary code analysis is the foundation of crucial tasks in the security domain; thus building effective binary analysis techniques is more important than ever. Large language models (LLMs) although have brought impressive improvement to source code tasks, do not directly generalize to assembly code due to the unique challenges of assembly: (1) the low information density of assembly and (2) the diverse optimizations in assembly code. To overcome these challenges, this work proposes a hierarchical attention mechanism that builds attention summaries to capture the semantics more effectively and designs contrastive learning objectives to train LLMs to learn assembly optimization. Equipped with these techniques, this work develops Nova, a generative LLM for assembly code. Nova outperforms existing techniques on binary code decompilation by up to 14.84 -- 21.58% higher Pass@1 and Pass@10, and outperforms the latest binary code similarity detection techniques by up to 6.17% Recall@1, showing promising abilities on both assembly generation and understanding tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language model",
"hierarchical attention",
"contrastive learning",
"assembly code"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/05395eaeba223c6be1b073a356ee472843dad3f0.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Nova: Generative Language Models for Assembly Code with Hierarchical Attention and Contrastive Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4z3IguA4Zg | MLLM can see? Dynamic Correction Decoding for Hallucination Mitigation | main | Active | Hallucination Mitigation;Multimodal Large Language Models;Decoding Strategy | foundation or frontier models, including LLMs | 5;5;6;6 | 4;4;3;5 | 3;2;3;3 | 3;2;3;3 | 3;3;3;4 | 5.5 | 4 | 2.75 | 2.75 | 3.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Given that DeCo's effectiveness depends on selecting an optimal layer range (e.g., 20-28), does the layer range need tuning for different MLLMs?\n- Could you provide more details on the selection process for the 500 images used in experiments? Additionally, which split(s) were used to determine and evaluate the hyperparameters, and were any specific criteria applied for these selections?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors demonstrate through probing experiments that MLLMs can recognize objects in earlier layers but tend to “forget” this information due to language model priors in deeper layers, leading to hallucinations. This insight offers a novel layer-wise perspective on the hallucination mechanism in MLLMs.\n- The figures illustrating token probabilities across transformer layers effectively highlight the trends for hallucinated versus non-hallucinated tokens, making the analysis accessible and informative.\n- Compared to existing methods like VCD and OPERA, DeCo achieves similar or better hallucination suppression with lower latency overhead, enhancing its practicality for real-world applications.\n- Evaluation across diverse benchmarks (CHAIR, POPE, and MME) and several models (InstructBLIP, MiniGPT-4, LLaVA-1.5, and Qwen-VL) provides a well-rounded assessment of DeCo’s effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces DeCo (Dynamic Correction Decoding), a decoding technique to mitigate hallucinations in Multimodal Large Language Models (MLLMs). The authors identify that MLLMs are capable of recognizing objects in earlier layers, but this recognition is suppressed in deeper layers by strong language model priors, which leads to hallucinations. DeCo dynamically integrates the output from preceding layers, which contain higher probabilities for ground-truth tokens, into the final layer logits to enhance visual grounding and suppress hallucinations. Experimental results on datasets such as CHAIR, POPE, MME and GPT-4o assisted evaluation demonstrate DeCo’s significant improvements over baselines in hallucination suppression across multiple MLLMs, with manageable latency increases, highlighting its practical applicability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In Figure 9, the response includes awkward repetition, with \"The horse statue is positioned on top of the chair\" stated multiple times. This raises questions about the effectiveness of the chosen α\\alphaα value in avoiding repetitive language, as the authors indicated that high \\alpha values could increase repetition.\n- In Figure 10, DeCo reduces a significant hallucination (misidentifying a lift as a \"chair\"), but the output still contains a hallucination about \"several other people visible in the background.\" This discrepancy between benchmark performance and qualitative examples suggests that DeCo’s effectiveness might not fully translate into consistently accurate real-world responses.\n- For each time step tt, language tokens that are not related to the visual input but are essential for sentence generation may be influenced as they pass through the proposed method. There appears to be a lack of investigation into the nature of this influence."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My primary concern lies in the potentially low quality of text generated from the preceding layers. I will be happy to raise my score if my current questions and concerns can be addressed."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The motivation seems interesting.\n2. The paper is well written and easy to follow. The diagrams are essential to understanding this paper.\n3. This paper achieves good results on existing datasets.\n4. The main technical pipeline is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper demonstrates that while MLLMs may produce incorrect target outputs in the final layer, they effectively recognize visual objects in the preceding layers. The authors propose a dynamic correction decoding method for MLLMs (DeCo), which adaptively selects relevant preceding layers and proportionally integrates their knowledge into the final layer to adjust the output logits. The proposed method outperforms existing approaches on public datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the experiments indicate improved performance in preceding layers, I am concerned about the coherence and richness of the text generated at these stages. Could you provide further evaluation metrics for text quality, such as BLEU or other relevant scores?\n2. In Figure 1(b), the interval [10, 20] appears optimal, yet in Figure 7(b), [17, 28] shows better performance. Could you clarify this discrepancy?\n3. Could you provide more evidence to demonstrate how dynamic soft modulation prevents abrupt changes in logits? Additional ablation studies might further substantiate this claim.\n4. Could you share detailed MME results to highlight the method's performance across different subtasks?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- It's unclear to me how is the affine layer $\\phi$, in L104, initialized and trained, if at all? If it needs training, then it seems each layer needs such a layer. If it doesn't, then how do we make sure that resentation across layers can share the same mapping to present token probability?\n- POPE evaluates answers in Yes/No. How could decoding strategy have impact on the performance for this benchmark?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This work makes an interesting observation of how visual information exists in intermediate layers, and then overridden by knowledge prior closer to the output\n- The proposed mitigation method is lightweight and efficient.\n- The experimental results are in general better than baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper makes the observation that MLLMs' internal prior suppresses the visual information, thus leading to hallucination. Besides, they empirically observe that intermediate layers may have less such suppression. Motivated by this observation, this work proposes to combine intermediate logits with final layer projection, and demonstrate improvement in reducing hallucination via empirical study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the presentation has a focus about image-conditioned generative language model, the methodology for finding 1 and 2, as well as the proposed layer selection and probability correction, are modality agnostic. The findings are mostly empirical, and it's unclear whether this is a general phenomenum for other models in the same size, nor for models in other sizes. \n\nThere has been quite a few literature in studying LLM's internal presentation and hallucination, only selectively listing a few as [1-5] . What the multi-modal setting brings is the strong conditional dependency, while for text-only use cases there might or might not be informative conditions. An analytical comparison on how an MLLM focuses or ignores input conditions can be more informative and persuasive in supporting the methodology. \n\nIn L191-L201 the paper compares the token output with and without the image condition. However this has been studied thouroughly in [6], which also proposes hallucination detection and mitigation method.\n\nThe method design also seems ad-hoc, there are thresholds in Eq2 and Eq3, layer interval a, b in Eq4 and the weight $\\alpha$ in Eq7. Together they contribute to amplifying the concern in the generalizability of the proposed method.\n\nI suggest to connect the empirical evidences in this paper to 1/ evidences from other papers with the same spirit, and 2/ the unique property and behavior of conditional multi-modal modeling. \n\n**Reference**\n\n[1] Liu, Junteng, et al. \"On the Universal Truthfulness Hyperplane Inside LLMs.\" arXiv preprint arXiv:2407.08582 (2024).\n\n[2] Li, Kenneth, et al. \"Inference-time intervention: Eliciting truthful answers from a language model.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[3] Zhang, Tianhang, et al. \"Enhancing uncertainty-based hallucination detection with stronger focus.\" arXiv preprint arXiv:2311.13230 (2023).\n\n[4] Azaria, Amos, and Tom Mitchell. \"The internal state of an LLM knows when it's lying.\" arXiv preprint arXiv:2304.13734 (2023).\n\n[5] Duan, Hanyu, Yi Yang, and Kar Yan Tam. \"Do LLMs Know about Hallucination? An Empirical Investigation of LLM's Hidden States.\" arXiv preprint arXiv:2402.09733 (2024).\n\n[6] Favero, Alessandro, et al. \"Multi-modal hallucination control by visual information grounding.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Could you clarify whether Finding 1 in Section 2.1 in the section is related to the methodology of the paper? It seems that embedding-level knowledge wasn't used to assist the model.\n- Could you follow LLAVA’s setting and conduct more extensive evaluations on comprehensive benchmarks like MMbench and MMVet, given their importance for assessing the model's overall performance?\n- Could the authors clarify the specific settings followed in the experiments presented in Table 3? How do these settings differ from those used in LLaVA?\n- Is this decoding method useful in more advanced VLLMs, such as Qwen-VL, VILA, etc.?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper provides a detailed examination of why MLLMs generate non-existent objects, offering valuable insights into the hallucination issue in image captioning tasks.\n\n- The introduction of DeCo is innovative, using preceding-layer knowledge to reduce hallucinations during inference, effectively improving output accuracy.\n\n- The method of probing across transformer layers reveals how hallucinations emerge in later layers, helping to understand MLLM behavior better."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates why MLLMs generate hallucinations, particularly in image captioning tasks, and introduces DeCo, an innovative method that leverages knowledge from earlier layers to reduce hallucinations during inference. However, I still have some concerns about this article, specifically in regard to the weaknesses.\nIf these concerns are addressed, I will consider raising my score."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I am confused by the experimental results about POPE in Table 3, as they do not seem to fully align with the result from LLAVA 1.5. \n- The authors did not perform more extensive evaluations on comprehensive benchmark such as MMbench and MMVet, which are crucial for assessing the model's overall performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A dynamic correction decoding method for MLLMs (Deco), which adaptively selects the appropriate preceding layers and proportionally integrates knowledge into the final layer to adjust the output logits for hallucination mitigation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mllm,\ntitle={{MLLM} can see? Dynamic Correction Decoding for Hallucination Mitigation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4z3IguA4Zg},\nnote={under review}\n}"
},
"abstract": {
"value": "Multimodal Large Language Models (MLLMs) frequently exhibit hallucination phenomena, but the underlying reasons remain poorly understood. In this paper, we present an empirical analysis and find that, although MLLMs incorrectly generate the targets in the final output, they are actually able to recognize visual objects in the preceding layers. We speculate that this may be due to the strong knowledge priors of the language model suppressing the visual information, leading to hallucinations. Motivated by this, we propose a novel dynamic correction decoding method for MLLMs (Deco), which adaptively selects the appropriate preceding layers and proportionally integrates knowledge into the final layer to adjust the output logits. Note that Deco is model agnostic and can be seamlessly incorporated with various classic decoding strategies and applied to different MLLMs. We evaluate Deco on widely-used benchmarks, demonstrating that it can reduce hallucination rates by a large margin compared to baselines, highlighting its potential to mitigate hallucinations."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Hallucination Mitigation",
"Multimodal Large Language Models",
"Decoding Strategy"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5243a318d6418f4b3733970695d24fa42b894334.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/82f20fe4945f9a0c88e0318d790a9d9f7053bee8.zip"
},
"title": {
"value": "MLLM can see? Dynamic Correction Decoding for Hallucination Mitigation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
4zQ5eIPtMp | Self-Exploring Language Models: Active Preference Elicitation for Online Alignment | main | Withdraw | Online Alignment;Large Language Model | alignment, fairness, safety, privacy, and societal considerations | Shenao Zhang;Donghan Yu;Hiteshi Sharma;Han Zhong;Zhihan Liu;Ziyi Yang;Shuohang Wang;Hany Hassan Awadalla;Zhaoran Wang | ~Shenao_Zhang1;~Donghan_Yu2;~Hiteshi_Sharma1;~Han_Zhong1;~Zhihan_Liu1;~Ziyi_Yang1;~Shuohang_Wang1;~Hany_Hassan_Awadalla1;~Zhaoran_Wang1 | 0 | 0 | 0 | 0 | 0 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nzhang2024selfexploring,\ntitle={Self-Exploring Language Models: Active Preference Elicitation for Online Alignment},\nauthor={Shenao Zhang and Donghan Yu and Hiteshi Sharma and Han Zhong and Zhihan Liu and Ziyi Yang and Shuohang Wang and Hany Hassan Awadalla and Zhaoran Wang},\nyear={2024},\nurl={https://openreview.net/forum?id=4zQ5eIPtMp}\n}"
},
"abstract": {
"value": "Preference optimization, particularly through Reinforcement Learning from Human Feedback (RLHF), has achieved significant success in aligning Large Language Models (LLMs) to adhere to human intentions. Unlike offline alignment with a fixed dataset, online feedback collection from humans or AI on model generations typically leads to more capable reward models and better-aligned LLMs through an iterative process. However, achieving a globally accurate reward model requires systematic exploration to generate diverse responses that span the vast space of natural language. Random sampling from standard reward-maximizing LLMs alone is insufficient to fulfill this requirement. To address this issue, we propose a bilevel objective optimistically biased towards potentially high-reward responses to actively explore out-of-distribution regions. By solving the inner-level problem with the reparameterized reward function, the resulting algorithm, named Self-Exploring Language Models (SELM), eliminates the need for a separate RM and iteratively updates the LLM with a straightforward objective. Compared to Direct Preference Optimization (DPO), the SELM objective reduces indiscriminate favor of unseen extrapolations and enhances exploration efficiency. Our experimental results demonstrate that when fine-tuned on Zephyr-7B-SFT and Llama-3-8B-Instruct models, SELM significantly boosts the performance on instruction-following benchmarks such as MT-Bench and AlpacaEval 2.0, as well as various standard academic benchmarks in different settings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Shenao_Zhang1",
"~Donghan_Yu2",
"~Hiteshi_Sharma1",
"~Han_Zhong1",
"~Zhihan_Liu1",
"~Ziyi_Yang1",
"~Shuohang_Wang1",
"~Hany_Hassan_Awadalla1",
"~Zhaoran_Wang1"
]
},
"authors": {
"value": [
"Shenao Zhang",
"Donghan Yu",
"Hiteshi Sharma",
"Han Zhong",
"Zhihan Liu",
"Ziyi Yang",
"Shuohang Wang",
"Hany Hassan Awadalla",
"Zhaoran Wang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Online Alignment",
"Large Language Model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "zhang|selfexploring_language_models_active_preference_elicitation_for_online_alignment"
},
"pdf": {
"value": "/pdf/068bd2cbc229b13a233275af3792af892675429a.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/d7aaa3fdf6770dc66cb3895afd2fc7369615f10c.zip"
},
"title": {
"value": "Self-Exploring Language Models: Active Preference Elicitation for Online Alignment"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
||||||||||
4zygH3k8Zr | Replacement Learning: Training Vision Tasks with Fewer Learnable Parameters | main | Active | Machine Learning;Deep Learning;Foundation Models | foundation or frontier models, including LLMs | 1;3;5;5;8 | 4;4;4;5;3 | 1;2;2;2;3 | 1;2;3;3;3 | 1;3;2;3;3 | 4.4 | 4 | 2 | 2.4 | 2.4 | -0.406745 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I like the paper overall and don't have any major questions to ask."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. A novel training strategy that replaces frozen layer parameters with a fusion of parameters from neighboring layers, controlled by two learnable parameters, reducing the training load without compromising performance.\n2. Replacement Learning shows substantial savings in memory usage and training time and achieves better accuracy than end-to-end training.\n3. The method performs well across diverse architectures (CNNs and ViTs) and datasets, suggesting broad applicability.\n4. Extensive experiments on benchmark datasets confirm the effectiveness of Replacement Learning in surpassing the performance of standard training approaches."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Replacement Learning, a novel training technique aimed at reducing the number of learnable parameters in deep learning models while maintaining or even enhancing model performance. The approach specifically targets the limitations of traditional end-to-end training, such as high computational demand, memory usage, and parameter redundancy, which are common in deeper architectures. Rather than updating all parameters during backpropagation, Replacement Learning freezes certain layers and uses parameters from adjacent layers, controlled by two learnable parameters, to inform the frozen layers through a parameter integration mechanism. This design enables the frozen layers to leverage both local and global feature representations, balancing historical context with new inputs while reducing memory and computational costs.\n\nThe authors conduct experiments across multiple image classification datasets (CIFAR-10, STL-10, SVHN, and ImageNet) using various architectures, including CNNs and Vision Transformers (ViTs). Results demonstrate that Replacement Learning reduces GPU memory usage, training time, and the number of parameters, while achieving higher accuracy than traditional end-to-end training. Furthermore, the method shows versatility, adapting effectively across different architectures and datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I like the paper overall. However, would like to point some weaknesses which the authors have also mentioned in their limitations section:\n1. While effective on image-based tasks, the approach has not yet been tested on other domains such as NLP or multimodal tasks, which limits its generalizability.\n2. The paper could benefit from a more in-depth discussion of any limitations associated with freezing certain layers and its impact on long-term learning dependencies, especially in deeper networks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does the value of $k$ impact the performance of the models? The author should perform an ablation study on this value.\n\n2. How does the proposed method compare with other parameter-efficient training methods?\n\n3. How does the proposed method compare with other alternative backpropagation methods and methods utilizing surrounding layers during training?\n\n4. Can this method be applied to fine-tuning pre-trained networks?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is clearly written and is generally easy to follow.\n\n2. The problem being studied in the paper is becoming increasingly important recently.\n\n3. The idea is simple yet seems to be something that people haven’t tried before."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an efficient training method for deep neural networks, named Replacement Learning, which aims to reduce the number of trainable parameters, training time, and memory consumption. Replacement Learning achieves this by selectively freezing the parameters of certain layers, which then utilize parameters from adjacent layers updated through a parameter integration mechanism controlled by just two learnable parameters. This method leverages information from surrounding structures to enhance overall model performance while reducing computation and conserving GPU memory."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed method only marginally reduces the GPU memory consumption and training time compared to the baseline training method. \n\n2. The paper did not compare the proposed method with any other parameter-efficient training methods, such as [3].\n\n3. Although the paper discusses related works on alternative backpropagation methods and training utilizing surrounding layers, none of the related works are compared with the proposed methods in the experiments.\n\n4. Parameter-efficient training methods [1, 2] are widely applied in fine-tuning pre-trained networks by selectively updating a small subset of model parameters, streamlining the adaptation process of pre-trained models, and facilitating rapid deployment across various domains. However, this paper only studies the setting for training-from-scratch.\n\n[1] Zhang, Taolin, et al. \"Parameter-efficient and memory-efficient tuning for vision transformer: a disentangled approach.\" ECCV 2024.\n\n[2] He, Xuehai, et al. \"Parameter-efficient model adaptation for vision transformers.\" AAAI 2023.\n\n[3] Mostafa, Hesham, and Xin Wang. \"Parameter efficient training of deep convolutional neural networks by dynamic sparse reparameterization.\" ICML 2019."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "I must express my disappointment after spending several hours reviewing such a submission.\n\nI suggest that the authors reconsider the motivation behind the proposed method and conduct all experiments with greater seriousness and care, as many results currently seem unreasonable."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "I’d say the idea is somewhat interesting, and the paper follows the ICLR submission style."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors aim to improve network efficiency and reduce parameter redundancy by introducing Replacement Learning, a straightforward approach that fixes the parameters in a layer by interpolating between two adjacent layers. The experimental results indicate that this method is effective—although I find these results somewhat difficult to believe."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Figure 1 doesn’t make any sense—using very large models on very small datasets. It would be more meaningful to test on a larger dataset like ImageNet, with different models of different scales for comparison.\n\n2. what is the motivation of such a method?\n\n3. There are several unsupported and arbitrary statements in the paper. For example, in Line 83, \"Considering that parameters from adjacent layers, if solely derived from either shallow or deep layers, often fail to simultaneously enable frozen layers to excel in learning both local and global features\" lacks justification. The relationship to local or global features is unclear here, as adjacent layers don’t inherently correspond to shallow or deep feature characteristics.\n\n4. In Eq. 9, the parameters of the i-th layer are a linear combination of those from the previous and next layers. What’s the motivation behind? It’s also unclear why this setup would yield better performance, as shown in Table 1 and Table 2.\n\n5. What is the motivation for providing detailed information about backpropagation? I don’t see any differences or novel contributions in this section.\n\n6. In the experiments, I don’t understand how reducing the number of parameters in a model can consistently improve performance across different datasets and models. This seems completely unreasonable and doesn’t make sense at all, especially I didn't see anything can help with this.\n\n7. BTW, the results on CIFAR-10, SVHN, and STL-10 aren’t reliable, as these datasets are too small to provide meaningful insights.\n\n8. Why is k=4 chosen? There should be an ablation study to justify this choice. Additionally, why is there a frozen layer every k layers throughout the networks? I assume we can select layers.\n\n9. What happens if the frozen layer is the last layer in a ResNet stage? How would parameter interpolation be handled in this case?\n\n10. The experimental settings MUST have issues. All the results for the vanilla models are SIGNIFICANTLY lower than original papers. This calls into question the reliability of the results presented in the paper, potentially not only ImageNet.\n\n11. Typo: feature maps can be found in Figure 4.3.1. \n\n12. I cannot tell any essiential differences among the four images in Fig. 3."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The writing of the article is quite good and the article is logical.\n2. The experiments on the classification task seem to be adequate.\n3. The charts and graphs are more beautiful and properly presented.\n4. Supplementary materials are substantial."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method of replacement Learning, which reduces computational overhead and resource consumption in deep learning. It enhances model performance while surpassing end-to-end training in efficiency and memory usage. The method has been validated on various datasets and architectures, showing its versatility and potential for broader application."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Title: The authors use \"Visual Tasks\" in the title. \"Visual Tasks\" include multiple tasks such as classification, detection, segmentation, etc., but it seems that the paper is only validated on the classification task. I suggest adding other tasks to the paper, as has been done in several recent PEFT works [1-3].\n[1] 1% vs 100%: Parameter-efficient low rank adapter for dense predictions.\n[2] Pro-tuning: Unified prompt tuning for vision tasks.\n[3] Adapter is all you need for tuning visual tasks.\n[4] Parameter-efficient is not sufficient: Exploring parameter, memory, and time efficient adapter tuning for dense predictions.\n\n2. Related work: the authors perhaps left out some of the most recent work of parameter-efficient fine tuning (PEFT). \n3. Experiments: (1) Experiments are performed only on classification tasks; (2) More parameter-efficient fine-tuning methods are available for comparison.\n\nI would consider increasing the score if the authors could provide more convincing comparative experiments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I would improve my rating if experiments are performed for points 1 and 4 in weakness and clearing my questions in points, 3 and 5.\n\nI also have a minor question:\n While it is clear both $\\theta_{i-1}$ and $\\theta_{i+1}$ are useful for approximating $\\theta_i$ why not expand it to $\\theta_{i-n}$ and $\\theta_{i+n}$, this would increase the global context of the computations with minimal increase in parameters?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Replacement Learning introduces a novel approach for training more efficient models with lesser number of parameters. The parameter integration has good future potential, especially when applied to structured pruning and upon supporting transfer learning.\n\n2. The experiments show good performance for image classification tasks for both convolutional and transformer networks for accuracy, memory and time for 1 epoch.\n3. The paper presents ablations to determine the robustness of the method and also presents complexity analysis of the method.\n4. The paper addresses the most experimental parameters in the appendix, exhibiting good reproducibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduced a neural network learning mechanism - Replacement Learning that replaces the parameters of selected layers with just 2 parameters a,b. The output of that layer is computer by a linear model a* activations of previous layer + b* activations of next layer. Given that similar layers in neural network produce correlated outputs, this linear combination approximates the replaced layer's outputs. The method reduces the parameter count, throughput and increases accuracy for image classification datasets(CIFAR-10,STL-10,SVHN and Imagenet-1k)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary concern with the paper is that the experiments are conducted by training networks from scratch. Transfer learning boosts the performance of classification tasks, for instance in the official vit paper[1], VIT B/16 upon transfer learning on CIFAR10 reached an accuracy of 98.13% while the paper achieves only 72.86%, therefore it is important to perform an experiment for examining the effect of replacement learning when transfer learnt. One possible reason why the paper omits trying transfer learning can be initialization of replaced parameters. Parameter Integration $a * \\theta_{i-1}+ b* \\theta_{i+1}$ is a linear model, this allows approximating a,b values for pre-trained layers in few steps. \n\n2. The paper only shows results on classification, effect of replacement learning on downstream tasks such as object detection and segmentation would strengthen the approach.\n\n3. The flow of gradients is not in a singular direction during replacment learning as suggested by Eq. 15 where $\\delta_{i}$ is used for gradient computation of $\\delta_{i+1}$. This could cause issues such as vanishing or exploding gradients and must be given a look at. \n\n4. While the experiments show better time per epoch, the effect of replacement learning on convergence must be studied. This is important as time for 1 epoch can be misleading when the number of epochs to converge is significantly higher than backpropagation.\n\n5. While the paper addresses that only MSA layers were chosen for replacement, where $\\theta_{i-1}$ and $\\theta_{i+1}$ also MSA layers? This is important as [2] does not guarantee correlation between MSA and MLP layers. \n\nNitpick: some figure references are wrong, Figures in the ablation display figure numbers 4.3.1, 4.3.2 and 4.3.3 which need to be corrected to figure 3,4,5. Usually it is the placement of caption and label in \\begin{figure} that causes this issue.\n\nReferences:\n[1] Dosovitskiy, Alexey. \"An image is worth 16x16 words: Transformers for image recognition at scale.\" arXiv preprint arXiv:2010.11929 (2020).\n[2] Venkataramanan, Shashanka, et al. \"Skip-attention: Improving vision transformers by paying less attention.\" arXiv preprint arXiv:2301.02240 (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024replacement,\ntitle={Replacement Learning: Training Vision Tasks with Fewer Learnable Parameters},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4zygH3k8Zr},\nnote={under review}\n}"
},
"abstract": {
"value": "Traditional end-to-end deep learning models often enhance feature representation and overall performance by increasing the depth and complexity of the network during training. However, this approach inevitably introduces issues of parameter redundancy and resource inefficiency, especially in deeper networks. While existing works attempt to skip certain redundant layers to alleviate these problems, challenges related to poor performance, computational complexity, and inefficient memory usage remain. To address these issues, we propose an innovative training approach called Replacement Learning, which mitigates these limitations by completely replacing all the parameters of the frozen layers with only two learnable parameters. Specifically, Replacement Learning selectively freezes the parameters of certain layers, and the frozen layers utilize parameters from adjacent layers, updating them through a parameter integration mechanism controlled by two learnable parameters. This method leverages information from surrounding structures, reduces computation, conserves GPU memory, and maintains a balance between historical context and new inputs, ultimately enhancing overall model performance. We conducted experiments across four benchmark datasets, including CIFAR-10, STL-10, SVHN, and ImageNet, utilizing various architectures such as CNNs and ViTs to validate the effectiveness of Replacement Learning. Experimental results demonstrate that our approach reduces the number of parameters, training time, and memory consumption while completely surpassing the performance of end-to-end training."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Machine Learning",
"Deep Learning",
"Foundation Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c973a7f54d76355ee653aae74c5fea389c0277f1.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/56ed34603fcdcc2c5d321d8d99c5577b28189532.pdf"
},
"title": {
"value": "Replacement Learning: Training Vision Tasks with Fewer Learnable Parameters"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
506BjJ1ziZ | COME: Test-time Adaption by Conservatively Minimizing Entropy | main | Active | Test-time adaption;Out-of-distribution generalization | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;5;5;8 | 3;3;4;4 | 2;3;3;3 | 2;2;2;3 | 2;3;3;4 | 5.25 | 3.5 | 2.75 | 2.25 | 3 | 0.70014 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. The tightness for the upper and lower bounds in Lemma 1 is determined by the choice of p. By considering the simple model where f(x) outputs the same logit for all classes, the ratio between the upper and lower bound is minimized by $p=\\infty$. Is is a better choice to consider $\\| f(x) \\|_\\infty = \\max_k | f_k(x) |$?\n\n2. The usage of exp transformation of logits in the softmax function appears pivotal to the proof of Theorem 1. And if we take b(x) as a general non-negative function of f(x), the upper bound may reduce to 1. And minimizing the entropy of opinion in equation 6 can also lead to the Dirac distribution, which is over-confident. Is there a characterization for the class of functions that could be used to form a reasonable belief b(x)?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper accurately spots the paradox of EM's learning objective: minimization of entropy leads to over-confidence. And the paper proposes a simple yet effective solution to minimize entropy with respect to a probability distribution that faithfully estimates the uncertainty without over-confidence. It is a very reasonable idea to differentiate between the statistics used for prediction and for uncertainty estimation, which has long been considered the same in the TTA literature. Therefore, the algorithm enjoys the feature that the entropy minimization can be tailored to samples with different uncertainty, which is also supported by the monotonicity result in Theorem 1. The introduction of SL for uncertainty estimation is natural and perfectly compatible with softmax functions used for training models in most cases. As a result, the implementation is light-weight, model-agnostic, and extremely easy to embed into any TTA algorithms based on the EM objective. The experiments are convincing by covering both standard TTA tasks and more challenging settings of open-world TTA. A surprisingly significant 34.5% improvement on accuracy is reported on the model of SAR. And the algorithm has further addressed uncertainty estimation under continual distribution shift as a side product, which itself is also an important problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the model collapse of the popular Entropy Minimization algorithm for Test-Time Adaptation. Motivated by the observation that the amplification of model over-confidence causes model collapse, this paper proposes to minimize entropy with respect to an augmented output distribution that includes the probability to reject a sample, which is an uncertainty estimation technique known as subjective logic. Moreover, a logit normalization is designed in order to avoid degenerated solutions. Theoretical analysis reveals that the resulting approach upper bounds model confidence during adaptation based on the sample-specific confidence of the initial model. The resulting algorithm, COME, can be easily embedded into general EM-based TTA methods with a few lines of code revision. Experiments across TTA, open-world and life long TTA settings demonstrate a significant and consistent improvement upon current EM baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "These are not necessarily weaknesses but rather some questions that I would like to confirm with the author.\n1. How does the algorithm ensure that $b_k$ is non-negative for the computation of entropy, since $b_k$ is implemented as \n$(e^{f_k(x)}-1)/ \\sum_{k'} e^{f_k'(x)}$ which could be negative?\n\n2. Why does the algorithm keep $u$ close to $u_0$? Does it imply that the uncertainty estimation for the pretrained model is trusted? What if the pretrained model is over-confident? What about the alternative constraint $u \\geq u_0$ which seems to be more conservative as is the objective of COME?\n\n3. Average false positive rate is used in experiments to assess uncertainty estimation. However, FPR measures the correctness of uncertainty estimation with such a binary perspective: for the samples we predict 1, what is the actual proportion of 0. Uncertainty estimation considers a more sophisticated question: for the samples we predict with a probability 0.7, is the actual proportion of 1 exactly 0.7? Expected calibration error (ECE) is a better metric in this sense.\n\n4. Are there standard errors of the reported results?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- This paper is well-motivated, and the story makes sense. \n- Extensive experiments have been done to support the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose Conservatively Minimizing Entropy, a method for test-time adaptation (TTA) that improves test-time adaption by managing prediction uncertainty. Unlike traditional entropy minimization, which can lead to overconfidence, COME uses a Dirichlet distribution to model uncertainty, allowing the model to avoid forced classification on unreliable data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My major concerns include:\n- For the proposed method: Why Dirichlet distribution is used? How is the Dirichlet distribution related to the final algorithm in Algorithm 1. In addition, what is the role of delta in Algorithm 1? It seems that the authors tell a long story about their algorithm, but the algorithm itself is rather simple.\n- For the theoretical analysis: Could the authors provide a more detailed (theoretical) comparison between the proposed method and traditional EM? What is the benefit?\n- For baselines: Some baselines are missing, for example, [1] and [2].\n- For the datasets: I'm curious why the authors follow literatures on outliers detection.\n- For Theory 1: What is the exact benefit of the upper bound of model confidence? I think it will also hurt the performance on some \"confident\" samples. \n\n[1] Nado, Z., Padhy, S., Sculley, D., D'Amour, A., Lakshminarayanan, B., & Snoek, J. (2020). Evaluating prediction-time batch normalization for robustness under covariate shift. arXiv preprint arXiv:2006.10963.\n[2]Zhou, A., & Levine, S. (2021). Bayesian adaptation for covariate shift. Advances in neural information processing systems, 34, 914-927."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed algorithm introduces a rejection mechanism for unreliable samples in the TTA process, preventing the model from learning from potentially noisy labeled data. It is simple to integrate into existing TTA frameworks, and the experimental results indicate satisfactory performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the issue of model collapse in entropy minimization algorithms for test-time adaptation. The authors propose a novel entropy minimization approach that models prediction uncertainty by defining a Dirichlet prior distribution over model predictions. This method regularizes the model to favor conservative confidence for unreliable samples. Experiments on benchmark datasets demonstrate the effectiveness of the proposed algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposed method and its theoretical analysis rely heavily on existing techniques, which limits its technical novelty.\n\nThe core concept shares some similarity with research on learning with rejection. It is recommended to discuss how the proposed loss function compares with the loss functions used in learning with rejection, as outlined in [1].\n\nThere is a lack of experiments involving real-world applications with distribution shifts, as exemplified in [2]. Testing the proposed algorithm on real-world data streams in dynamic environments is suggested to validate its robustness.\n\nReferences:\n\n[1] Cortes, Corinna, Giulia DeSalvo, and Mehryar Mohri. \"Learning with rejection.\" Algorithmic Learning Theory (2016).\n\n[2] Yao, Huaxiu, et al. \"Wild-time: A benchmark of in-the-wild distribution shift over time.\" Advances in Neural Information Processing Systems 35 (2022): 10309-10324."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- (Eq. (6)) What if a hyperparameter $\\lambda \\in \\mathbb{R}$ is introduced as $-\\sum_{k=1}^K b_k \\log b_k - \\lambda u \\log u$? This seems to be a straightforward generalization of Eq. (6).\n\n- Is there any quantitative correspondence between $\\tau$ and $\\delta$ (Eq. (9) & (7))?\n\n- How can we set $p$ and $\\tau$ (or $\\delta$) in practice?\n\n- To my understanding, the regularizer (Eq. (9)) effectively prevents spurious training that would drive the second term in Eq. (6) to zero by enforcing $e \\rightarrow 0$. Is this correct?\n\n- To me, the proposed method seems to be a simple combination of known techniques (to clarify, I do *not* claim that simplicity alone is grounds for rejection at all). Could you clarify the differences of the proposed technique from other Bayesian approaches, confidence calibration algorithms, semi-supervised learning, and entropy regularization techniques? A quantitative and objective discussion would be preferable, which would significantly enhance the paper's contribution and clarity."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The motivation of the proposed method is clear. \n\n- The idea of transforming the optimization problem with a constraint (Eq. (7)) into a simpler form (Eq. (9)) is interesting and effective, which significantly simplifies the problem.\n\n- Several theoretical results, as well as empirical results, are provided. Theorem 1 helps quantitative understanding of the proposed method.\n\n- Code is available."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a Bayesian inference technique to address the overconfidence problem in test-time domain adaptation. Experiments demonstrate its effectiveness, achieving a SOTA performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Experiments on the dependence of the results on $p$ and $\\tau$ are lacking. I would like to see the results and discussions when $p \\neq 2$ and $\\tau \\neq 1$.\n\n- (Major) The paper applies a Bayesian inference (Eq. (5)) with a regularization to the overconfidence problem inherent in TTA. The \"EM\" (entropy minimization) mentioned in the paper is, in a more general context, the unsupervised learning using soft pseudo-label, which has a wide variety of applications beyond TTA.\nWhile the proposed method is technically sound, it would benefit from discussion in a broader context—such as unsupervised learning with a pretrained model, unsupervised domain adaptation, source-free domain adaptation, and semi-supervised learning—to emphasize its wide applicability as a key contribution.\n\n- (Major) Error bars are missing. Could you provide error bars because several performance gains are marginal?\n\n- The performance metric \"Avg.\" in the tables are nonsense, while it is actually a bad convention in the field. Obviously, a \"1% gain\" is quite different in iNaturalist and SSB-Hard, for example.\n\n- (Minor) A large part of the paper is dedicated to reviewing previously known works, such as the overconfidence problem, which hinders readability.\n\n- (Minor) Much of the paper, particularly the description of the proposed method, is redundant. A more direct tone is generally preferable in academic writing.\n\n- Overall, while the paper is well-written and the proposed method is interesting and effective, the paper would benefit from a more refined articulation of its contributions and focus. Additionally, the reproducibility issue should be addressed."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an alternative to entropy minimization as a better learning principle for TTA tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024come,\ntitle={{COME}: Test-time Adaption by Conservatively Minimizing Entropy},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=506BjJ1ziZ},\nnote={under review}\n}"
},
"abstract": {
"value": "Machine learning models must continuously self-adjust themselves for novel data distribution in the open world. As the predominant principle, entropy minimization (EM) has been proven to be a simple yet effective cornerstone in existing test-time adaption (TTA) methods. While unfortunately its fatal limitation (i.e., overconfidence) tends to result in model collapse. For this issue, we propose to conservatively minimize the entropy (COME), which is a simple drop-in replacement of traditional EM to elegantly address the limitation. In essence, COME explicitly models the uncertainty by characterizing a Dirichlet prior distribution over model predictions during TTA. By doing so, COME naturally regularizes the model to favor conservative confidence on unreliable samples. Theoretically, we provide a preliminary analysis to reveal the ability of COME in enhancing the optimization stability by introducing a data-adaptive lower bound on the entropy. Empirically, our method achieves state-of-the-art performance on commonly used benchmarks, showing significant improvements in terms of classification accuracy and uncertainty estimation under various settings including standard, life-long and open-world TTA. Our code is available at: \\href{https://anonymous.4open.science/r/anonymous-9F46}{https://anonymous.4open.science/r/anonymous-9F46}."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Test-time adaption",
"Out-of-distribution generalization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c5c5247d760fe1da2b23102c3b3a9956b72078bb.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/f250a93f733dc54853d2348b9dcd81fb248abd50.zip"
},
"title": {
"value": "COME: Test-time Adaption by Conservatively Minimizing Entropy"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
50RNY6uM2Q | MG-LLaVA: Towards Multi-Granularity Visual Instruction Tuning | main | Active | Multi-Modality;Large Language Models | applications to computer vision, audio, language, and other modalities | 3;5;5;5;5 | 5;5;3;5;5 | 2;2;3;3;3 | 2;2;2;2;2 | 3;2;4;3;3 | 4.6 | 4.6 | 2.6 | 2 | 3 | -0.25 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to my comments above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-organized. The proposed model is evaluated on multiple tasks including general visual understanding benchmarks, VQA, and video datasets. Ablation study and runtime evaluation are also provided."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an MLLM architecture to improve the multi-granularity visual understanding abilities of multimodal models. The method follows the idea proposed in Mini-Gemini to fuse high and low-resolution visual encoders and adds object recognition from other foundation models to enhance object-level understanding ability. A series of models ranging from 3.8B to 34B are proposed and the models are evaluated on multiple popular benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The technical contribution of the paper is not very significant. The paper claims the main contribution is combining low, high-resolution, and object-level features. But the design of combining low and high-resolution features mainly comes from mini-Gemini and some modifications on the fusion module are proposed in the paper. The introduction of object-level features requires extra models and makes the base architecture more complex. \n\n- I am not convinced about the necessity of introducing the extra object-level information. Recent state-of-the-art MLLMs like Qwen2 VL [r1], LLaVA-Onevision [r2] and Pixtral [r3] all adopt a single encoder solution. I think the MLLMs themselves should have the ability to capture object-level information from the images with sufficient data and a proper training strategy. The method proposed in the paper may reduce the requirement for training data but the more complex architecture also makes the solution less general. Besides, the method also didn't show large improvements on SEED (69.4 vs. 68.9) and MMStar (35.1 vs. 37.6) compared to Mini-Gemini with the same base LLM. \n\n- Many recent MLLMs like [r2, r4] are not compared. Compared to these methods, the proposed solution is not strong enough and imore complex. \n\n[r1] Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution\n\n[r2] LLaVA-OneVision: Easy Visual Task Transfer\n\n[r3] Pixtral 12B\n\n[r4] Cambrian-1: A Fully Open, Vision-Centric Exploration of Multimodal LLMs"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The concerning questions are stated in the weakness section. Based on the weaknesses listed above, I lean to reject this manuscript at its current version."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The key claim for the paper that multi-granularity features with low-res, high-res, and object features can improve detailed understanding and object recognition skills is reasonable. The authors design the conv-gated fusing module and demonstrate its effectiveness through complete ablation studies.\n2. The series of models and benchmarks are clear and complete. The authors train the variants for MG-LLaVA based on Phi, Vicuna, LLaMA3, and Yi1.5 and conduct experiments on various multi-modal benchmarks. \n3. The overall architecture of the paper is well-structured and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes MG-LLaVA to improve the capability for recognizing multi-granularity features for current MLLMs and flatten the restriction of the resolution in visual inputs. MG-LLaVA includes the low-resolution, high-resolution, and object-level features altogether and fuses them with a conv-gate fusion module for the general visual features. The object ROIs are extracted by a pre-trained detection model for better object-level understanding skills. The paper proposes a series of MLLMs ranging from 3.8B to 34B based on various LLMs and shows strong performance across image and video multimodal benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The idea of fusing multi-granularity features is not novel, as integrating low-resolution and high-resolution images has been demonstrated effect by a range of works, including LLaVA-NeXt, LLaVA-HR, Mini-Gemini, LLaVA-UHD, etc. The difference in MG-LLaVA lies in the usage of detected objects. However, the detection operation introduces extra computational costs and external models with extra information, which is not an optimal solution. \n2. The performance comparisons against existing MLLMs are relatively weak. For example, the results of MG-LLaVA equipped with Vicuna-7B do not surpass baselines with similar efforts on some benchmarks, including SQA, TextVQA, MMStar, etc. The overall number of training data is significantly heavier than the baselines (2.6M), which makes the comparisons more unfair. \n3. Some key ablations seem lacking. The authors are encouraged to clearly show the contributions for performance with every part of the visual feature to show the difference between previous works."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The performance improvement on similarly sized LLMs in Table 2 and Table 3 appears modest.\n2. The ablation study would benefit from visual comparisons to illustrate the impact of each component, such as case studies or visualizations of feature-level effects.\n3. Some failure cases should be shown to provide insights into the method’s limitations.\n4. It is unclear if the method can handle larger images, such as 1024p or 2k resolutions."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The integration and fusion of multi-granularity features with object-centric features is novel for MLLMs.\n2. Experimental results demonstrate the effectiveness of the proposed pipeline.\n3. The paper is well-written and clearly presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel Multimodal Large Language Model (MLLM) that improves visual processing capabilities by incorporating a multi-granularity vision flow, which includes low-resolution, high-resolution, and object-centric features. This approach enhances the performance of current Large Language Models (LLMs), as demonstrated in the experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The performance improvement on similarly sized LLMs in Table 2 and Table 3 appears modest.\n2. The ablation study would benefit from visual comparisons to illustrate the impact of each component, such as case studies or visualizations of feature-level effects.\n3. Some failure cases should be shown to provide insights into the method’s limitations.\n4. It is unclear if the method can handle larger images, such as 1024p or 2k resolutions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. The SEEDBench mentioned in the article uses SEEDBench-Image, but I checked the scores for leaderboard and the other methods mentioned in the paper, and they seem to correspond to SEEDBench-Avg (which contains both video and image), so it's not clear to me whether the comparison here includes scores from the video task.\n2. If an open vocabulary detector is used, why is a tagger used to determine the bounding box instead of generating ROI directly based on text embedding?\n3. The article suggests that this approach is intuitively better for small target comprehension or counting tasks, are there any datasets in this area that show that this approach has more significant performance gains on specific tasks?\n4. I found that the Monkey model uses a similar idea to enhance the performance of the model and also proposes to augment the data with traditional CV methods for refinement, is there a comparison to this approach in the paper? For example, changing the base LLM to Qwen-7b to compare with Monkey (Li et al. 2023) and more models on this field.\n\n[1] Li, Zhang, et al. \"Monkey: Image resolution and text label are important things for large multi-modal models.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2024."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The goal of this paper is to release the power of MLMs on fine-grained tasks. A high resolution visual encoder is introduced to make up for the complement of previous work. And some fusion and compression strategies are introduced to ease the computational pressure. In addition to this, the article demonstrates that this new framework achieves significantly higher scores on MLMs at several scales, which fully demonstrates the effectiveness of the method. Moreover, this is the first approach to introduce object-level features in the field of MLMs, and experimentally, the article demonstrates the ability of their method to achieve higher scores than private models under MMBench and SEEDBench."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to improve the visual capabilities of MLMs (multimodal large models) by proposing a new model MG-LLaVA. limited to resources, most of MLMs nowadays just have low resolution inputs, which are challenging on fine-grained tasks. Therefore, this papet proposes a novel framework that introduces object-level feature in addition to high resolution visual encoder. Based these, the article also uses a gating-based fusion strategy as well as explicit integration on object-level feature. These approaches reduce the computational pressure introduced by high resolution images and simultaneously improve performance on fine-grained tasks. On MMBench and SEEDBench, the model outperforms even the private models GPT-4V and GeminiPro-V. The article also conducts extensive experiments to show that their framework achieves competitive scores on multiple datasets of images or videos."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. As mentioned in the article itself, the introduction of multi-granularity and multi-scale to enhance model performance is a common approach to convolutional networks, and merely migrating this approach to the field of MLMs is hardly an innovative contribution. Some of the algorithms used in the article from object detection only do some information enhancement on the input side, while many MLMs can already accomplish the object detection task by themselves nowadays.\n2. The scores achieved on both the MMBench as well as SEEDBench datasets, while respectable, are not compared to some of the more competitive models. I identified MMB as version 1 and SEEDBench as Avg based on the scores of Qwen-VL and MiniCPM-V2, and there are a number of scores on both leaderboards that are higher than the scores of MG-LLaVA work, eg. Honeybee (Cha et al., 2024), AllSeeing-v2 (Wang et al. 2024) based on Vicuna-13b at MMB-test. and then you can also find a lot of similar models with higher scores on the same substrate.\n3. In addition to Perception Benchmarks. this problem can also be found in Visual QA and Video QA. such as on the MSRVTT-QA dataset. there are already many models with very high scores in 2024. Some of them also use some methods to improve the model's ability on fine-grained tasks. eg. Flash-VStream (Zhang et al. 2024) Monkey (Li et al. 2023). The article does not seem to compare these new 2024 models.\n\nTo summarize, I think the approach proposed in the article is valid, but MG-LLaVA does not do the job of making a difference, either from an innovation perspective or from a performance perspective.\n\n[1] Cha, Junbum, et al. \"Honeybee: Locality-enhanced projector for multimodal llm.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2024.\n\n[2] Wang, Weiyun, et al. \"The all-seeing project v2: Towards general relation comprehension of the open world.\" *arXiv preprint arXiv:2402.19474* (2024).\n\n[3] Zhang, Haoji, et al. \"Flash-VStream: Memory-Based Real-Time Understanding for Long Video Streams.\" *arXiv preprint arXiv:2406.08085* (2024).\n\n[4] Li, Zhang, et al. \"Monkey: Image resolution and text label are important things for large multi-modal models.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Although the method performs well on the general VQA, it lacks a comprehensive assessment of fine-grained perception capabilities. It would be more fair and convincing to compare it with region-level methods like Next-Chat and Osprey on the RefCOCO dataset. This could be accomplished by using the bounding box of the corresponding target as input.\n\n2. It is evident that using object-level features can enhance the perception ability of MLLMs. However, incorporating additional detectors introduces extra computational costs and biases. An equitable efficiency comparison is necessary. if these added costs surpass the benefits from data expansion, parameter extension, or data filtering, it results in negative optimization, as I believe is the case with MG-LLaVA. From the performance comparison, when using Vicuna 7B, MMStar exhibits lower performance than other models, indicating data leakage risk and validating the risk of bias introduced by reliance on detectors.\n\n3. Although MG-LLaVA shows improvements in general capabilities, these enhancements are marginal. The added expense of using additional detection models and object-level features should yield a greater performance boost. Moreover, during inference, reliance on detection results from other models is cumbersome. Transforming external dependencies into self-mining processes could significantly enhance the practical utility of the model."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The structure of paper is simple and easy to read, and the model implementation is very easy to follow.\n2. The idea is very straightforward, and the experiments are solid. It is reasonable to introduce multi-granularity object-level features to enhance the perceptual capabilities of Multimodal Large Language Models (MLLMs)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Summary:\n\nMG-LLaVA is a multi-modal large language model (MLLM) designed to improve visual processing capabilities by using a multi-granularity vision flow. This includes low-resolution, high-resolution, and object-centric features to enhance perception tasks requiring detailed visual information. Extensive experiments have validated its effectiveness. \n\nContributions:\n\n1. Leveraging an additional open vocabulary detection model introduces multi-granularity object-level features to enhance the perceptual capabilities of MLLMs.\n2. Extensive experiments demonstrate the effectiveness of the method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The idea appears incremental, as it simply integrates high-resolution image interpretation with region-level image understanding, resembling a trick \n2. Experimental evaluations and fair comparisons are notably lacking. Given that multi-granularity features are utilized to augment the model's perceptual abilities, evaluations should be conducted on fine-grained perception datasets. General VQA is inadequate for assessing the fine-grained perceptual capabilities of MLLM. \n3. Excessive reliance on additional detector inputs may result in suboptimal, non-end-to-end outcomes."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mgllava,\ntitle={{MG}-{LL}a{VA}: Towards Multi-Granularity Visual Instruction Tuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=50RNY6uM2Q},\nnote={under review}\n}"
},
"abstract": {
"value": "Multi-modal large language models (MLLMs) have made significant strides in various image comprehension tasks. However, the majority of these models are constrained to processing low-resolution images, which limits their effectiveness in perception tasks that necessitate detailed visual information. In our study, we present MG-LLaVA, an innovative MLLM that enhances the model's visual processing capabilities by incorporating a multi-granularity vision flow, which includes low-resolution, high-resolution, and object-centric features. We propose the integration of an additional high-resolution visual encoder to capture fine-grained details, which are then fused with base visual features through a Conv-Gate fusion network. To further refine the model's object recognition abilities, we incorporate object-level features derived from bounding boxes identified by offline detectors. Being trained solely on publicly available multimodal data through instruction tuning, MG-LLaVA demonstrates exceptional perception skills. We instantiate MG-LLaVA with a wide variety of language encoders, ranging from 3.8B to 34B, to evaluate the model's performance comprehensively. Extensive evaluations across multiple benchmarks demonstrate that MG-LLaVA outperforms existing MLLMs of comparable parameter sizes, showcasing its remarkable efficacy."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-Modality",
"Large Language Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f96b774be6db03a91f2bee5864a092daa9c52f55.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/05c109cae2229d273c5122d918632087800118f3.zip"
},
"title": {
"value": "MG-LLaVA: Towards Multi-Granularity Visual Instruction Tuning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
50UzaXh0gC | One Wave to Explain Them All: A Unifying Perspective on Post-hoc Explainability | main | Active | interpretability;feature attribution;wavelet;images;audio;3D shapes | interpretability and explainable AI | 3;3;3;5 | 3;3;5;3 | 1;2;2;3 | 2;1;1;2 | 2;3;3;3 | 3.5 | 3.5 | 2 | 1.5 | 2.75 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Developing a multi-domain explanation method is intriguing, and the discussion of key challenges is reasonable.\n2. The manuscript is well written. It is easy to follow this work.\n3. The experiments conducted with the proposed methods are adequate."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper present the Wavelet Attribution Method (WAM), which improves gradient-based feature attribution by utilizing the wavelet domain as a comprehensive framework for explaining classifiers across various domains. The findings indicate that WAM meets or surpasses SOTA metrics for faithfulness, effectively identifying not only the locations of significant features but also highlighting their structural patterns."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe there is a lack of sufficient baselines. It would be helpful to include more options such as LIME, SHAP, and concept-based explanations for image and audio data. Since there is no quantitative evaluation in 3D settings, adding 3D LIME, SHAP, sensitivity analysis, and Layer-wise Relevance Propagation (LRP) for 3D baselines would be a solid starting point.\n\n References:\n\n [1] \"Why Should I Trust You?\": Explaining the Predictions of Any Classifier \n [2] A Unified Approach to Interpreting Model Predictions \n [3] Towards Automatic Concept-based Explanations \n2. The experiments were conducted on only one dataset; therefore, it would be essential to include results from several datasets.\n3. In the audio results (Figure 1 and Figure 4), it is quite challenging to identify the areas being explained. Making the less important areas grayscale while highlighting the significant areas in red would improve interpretability.\n4. It would have been better to conduct a human study for the qualitative evaluation. For example, utilizing Amazon Mechanical Turk (MTurk) to ask annotators to evaluate WAM while providing explanations for other baselines would be beneficial."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you provide a more rigorous definition of 'structural components' and clarify how they differ from standard features in the context of explainability? Specifically, can we establish any meaningful relationships between these components based on their implicit structure?\n\n2. Since inter-scale dependencies are central to your claims, what specific dependencies does the wavelet domain preserve, and how does this preservation impact attribution in practice? For example, in the case of image explanations, presenting different scales of explanations does not seem to provide substantial additional insight.\n\n3. To what extent have you included newer, state-of-the-art models in your evaluation, and how might WAM perform with models developed after 2017, considering the rapid advancements in explainability techniques? Have you considered expanding the method to a self-explainable framework by introducing a novel loss term directly in the wavelet domain?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "WAM brings an intriguing approach by utilizing wavelet decomposition to improve gradient-based feature attribution methods. The method leverages the mathematical properties of the wavelet domain, potentially addressing limitations of saliency maps that flatten hierarchical and spatial relationships. This could provide meaningful explanations by capturing features across multiple scales. In theory, WAM’s emphasis on inter-scale dependencies could enhance explainability across images, audio, and 3D shapes, offering an innovative view on XAI. Additionally, by unifying SmoothGrad and Integrated Gradients, WAM capitalizes on established approaches while potentially broadening their applicability across multiple modalities. This multimodal capability, though perhaps overstated, should be a promising generalization that is not commonly found in the comparison methods, which are often restricted to single data domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents the Wavelet Attribution Method (WAM), a novel explainability approach for deep learning models in the wavelet domain. Unlike traditional pixel-based attribution methods (saliency maps), WAM leverages the structural benefits of the wavelet domain, offering a more generalizable approach that applies to various data modalities, including images, audio, and 3D shapes. WAM decomposes the model’s decisions by analyzing how features in the wavelet-transformed space affect predictions. The method integrates gradient-based attribution techniques with wavelet decomposition to capture both where (location) and what (content) aspects of the data structure. Through empirical evaluation, WAM demonstrates superior performance on faithfulness and fidelity metrics for image and audio data, achieving enhanced clarity in the model’s decision-making process."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Despite its ambitious goals, WAM introduces several ambiguities and potential oversights. Key among them is the unclear use of 'structural components', a term the paper uses to describe feature-level insights that the method claims to provide. This concept, critical to WAM’s claims of 'what' explainability, lacks a clear definition or grounding in quantifiable relationships among components, making it difficult to ascertain whether these are indeed structural features rather than just relevant input attributes. Furthermore, while wavelet decomposition is introduced as a novel approach to attribution, the practical interpretability of multi-scale heatmaps remains underexplored in the paper. it is unclear how users can derive specific insights from these maps without a more explicit explanation. WAM’s assertion of state-of-the-art (SOTA) performance is another potential weakness, given that its comparisons rely largely on 2017 models like SmoothGrad, Grad-CAM, and Integrated Gradients, raising questions about whether the method is genuinely competitive in the context of more recent advancements in the XAI field. Additionally, the effectiveness of the faithfulness metrics used to benchmark WAM’s performance could benefit from further clarification, especially given the method’s claims of surpassing existing techniques across domains."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Potential plagiarism: this paper's approach is almost identical with the paper \"Assessment of the Reliablity of a Model's Decision by Generalizing Attribution to the Wavelet Domain\" which presented at workshop \"XAI in Action: Past, Present, and Future Applications workshop at NeurIPS 2023\". I understand that workshop papers are not considered as formal archived publications. However, I have some concerns about potential plagiarism, as it is unclear whether the authors of the current submission are the same as those of the workshop paper."
},
"flag_for_ethics_review": {
"value": [
"Yes, Research integrity issues (e.g., plagiarism, dual submission)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In Figure 2 (d), I don’t know why decomposing important coefficient at each scale is necessary. Is there specific reason that scaling is important?\n\nDoes WAM can discriminate each object when multiple classes are at one image, such as Cat-dog image?\n\nWhat is GRADWCAM in figure 5?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-structured, presenting its concepts clearly and in an accessible manner. The theoretical foundations for using gradients in the wavelet domain are well-developed, filling a gap in the current literature where such an approach has not been extensively explored.\n\n The paper introduces a novel method by leveraging gradients in the wavelet domain, which provides a new perspective on feature attribution.\n\nThe paper's evaluation of the proposed method's faithfulness using multiple faithfulness metrics is thorough and valuable. By comparing the proposed approach across different evaluation criteria, the authors demonstrate the robustness and reliability of their method.\n\n The figures and visualizations are well-designed, enhancing the clarity of the paper. They effectively illustrate the principles of the Wavelet Attribution Method (WAM) and provide a clear understanding of how wavelet-based attributions differ from traditional pixel-based methods. The use of visual examples makes the theoretical concepts more accessible and supports the argument for the method’s efficacy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the “Wavelet Attribution Method(WAM),” a feature attribution technique in explanable AI that attiburion is on wavelet domain. WAM leverages the wavelet domain to extend gradient-based feature attributions, preserving the multi-scale structures of the input data. This approach provides a unified framework applicable across various modalities such as images, audio, and 3D shapes. Empirical evaluations demonstrate that WAM matches or surpasses other methods in faithfulness metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The approach used in this paper shares similarities with the WaveletX(Kolek et al, 2022) method, which also performs saliency mapping in the wavelet domain. The primary distinction lies in the use of gradients as a mask in this work, While WaveletX optimize the mask on wavelet domain. However, this difference may not be significant enough to constitute a radical contribution to the field. It would be better to explicitly compare both methods and highlight the novel aspects of WAM.\n\nThe paper does not include quantitative assessments for 3D shape analysis and relies solely on qualitative results. Incorporating quantitative metrics would strengthen the evaluation and provide a more comprehensive understanding of the method's performance in this domain.\n\nAlthough the authors claim that the Wavelet Attribution Method (WAM) outperforms other approaches across different domains, the results in Table 2 suggest otherwise. Specifically, WAM does not consistently outperform Integrated Gradients, indicating that the performance advantage may not be as significant as claimed.\n\nThe experimental comparisons primarily involve methods like Integrated Gradients, GradCAM++, and SmoothGrad, which are not the most recent or best-performing approaches according to the fidelity metric. Including comparisons with more recent and state-of-the-art methods, such as LRP-αβ (Samek et al., 2016), LayerCAM (Jiang et al., 2021), Guided Backpropagation (Selvaraju et al., 2016), AttnLRP (Achibat et al., 2024), and SRD (Han et al., 2024), would strengthen the evaluation and better demonstrate WAM's superiority.\n\nThe paper does not adequately demonstrate how WAM enables an understanding of what features the model uses to make decisions. While the method highlights important wavelets, it does not clarify the specific meaning or relevance of these wavelets to the classification task. For instance, approaches like CRAFT (Concept Recursive Activation FacTorization for Explainability, Fel et al., 2023) offer more explicit explanations by identifying meaningful concepts (e.g., \"elephant tusk\") that recur across multiple samples. Providing a similar level of interpretability by linking important wavelets to specific semantic features would improve the explanatory power of WAM."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could you clarify why many results in Table 3 are zero when using the Faithfulness metric?\n2. Could you explain why Integrated Gradients perform best on the \\mu-Fidelity metric but perform worst in Faithfulness? Is this discrepancy due to different experimental setups?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The topic addressed in this paper is important, and the proposed method is novel. Current attribution methods struggle to explain and distinguish structural components in the input, and the integration of wavelets into attribution calculations shows promise in addressing this limitation.\n2. Extensive evaluations are conducted across multiple modalities, including images, audio, and 3D shapes.\n3. This paper is easy to follow, with detailed descriptions of evaluation metrics and experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors identified a gap in existing attribution methods, specifically their inability to explain the structural components of input data. The authors propose a novel wavelet-based attribution method extending to multiple modalities including images, audio, and 3D shapes. While the topic is timely and the problem addressed is of significant importance, the proposed method lacks a clearly demonstrated advantage in explaining structural components compared to existing techniques. Moreover, the quantitative results do not convincingly showcase the method's superiority. Therefore, I tend to reject this paper in its current version."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary weakness is the lack of significant differentiation between the proposed attribution method and existing approaches. The paper provides limited analysis or visualizations that convincingly show how WAM offers better hierarchical explanations. While some comparisons are presented (e.g., in Figures 2 and 12), I don’t see clear and enough advantages in explaining input structures. The authors should further explore or emphasize the distinctive aspects of their method.\n2. The quantitative results do not consistently demonstrate improvements over existing attribution methods. While comparisons with older methods are acceptable given the novelty of the proposed approach, the proposed method falls significantly behind in several key metrics, such as in Tables 3 and 5 (Appendix). Additionally, the results in Table 1 are concerning; given the definition of the Faithfulness metric in Eq. 9, the output should always be positive. Why, then, are many results in Table 3 reported as zero?\n3. The organization of the results section could be improved. For example, the discussion of perturbation-based attribution methods in Section 4.2 appears abruptly and feels disconnected from Section 4.1."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We generalize gradient-based explainability to the wavelet domain; expanding it to new modalities beyond images and show that it yields novel insights on model understanding.."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024one,\ntitle={One Wave to Explain Them All: A Unifying Perspective on Post-hoc Explainability},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=50UzaXh0gC},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite the growing use of deep neural networks in safety-critical decision-making, their inherent black-box nature hinders transparency and interpretability. Explainable AI (XAI) methods have thus emerged to understand a model's internal workings, and notably attribution methods also called Saliency maps. Conventional attribution methods typically identify the locations - the where - of significant regions within an input. However, because they overlook the inherent structure of the input data, these methods often fail to interpret what these regions represent in terms of structural components (e.g., textures in images or transients in sounds). Furthermore, existing methods are usually tailored to a single data modality, limiting their generalizability. In this paper, we propose leveraging the wavelet domain as a robust mathematical foundation for attribution. Our approach, the Wavelet Attribution Method (WAM) extends the existing gradient-based feature attributions into the wavelet domain, providing a unified framework for explaining classifiers across images, audio, and 3D shapes. Empirical evaluations demonstrate that WAM matches or surpasses state-of-the-art methods across faithfulness metrics and models in image, audio, and 3D explainability. Finally, we show how our method explains not only the where - the important parts of the input - but also the what - the relevant patterns in terms of structural components."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"interpretability",
"feature attribution",
"wavelet",
"images",
"audio",
"3D shapes"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e2c23f05d88b97d4fbd8d1f0eac52eaf0c099818.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/c1ecfc8989db2ff4e828e769fc492ac4a0b928a5.zip"
},
"title": {
"value": "One Wave to Explain Them All: A Unifying Perspective on Post-hoc Explainability"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
50cmx4SrkM | Bayesian Analysis of Combinatorial Gaussian Process Bandits | main | Active | Multi-armed bandits;Combinatorial bandits;Contextual bandits;Gaussian processes;Energy-efficient navigation | reinforcement learning | 5;5;5;8 | 2;4;3;3 | 3;2;3;3 | 2;1;3;3 | 4;2;2;3 | 5.75 | 3 | 2.75 | 2.25 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please respond to my above concerns.\n\nIn addition, I would request the authors to add theorems / propositions after Theorems 3.2 and 3.6, without any \\gamma_t and \\beta_t terms. Or more generally, with as few variables as possible."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The main strengths of the paper is the theory. I do believe the GP semi-bandit problems considered in this paper are important, and having regret bounds for the algorithms discussed in this paper is also useful. \n\nSpecifically, it is nice to see sub-linear regret bound for all three algorithms.\n\nFurthermore, I also believe that the general techniques developed here may be useful to derive regret bounds for other bandit settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors derive Bayesian regret bounds for various algorithms applied to combinatorial volatile GP semi-bandit problems. Specifically, the authors derive regret bounds for 3 algorithms: GP-UCB, GP-BayesUCB, and GP-Thompson Sampling. In comparison to previous works, this is the first regret bound for GP-Bayes UCB, and in addition, extend the existing regret bounds for GP-UCB and GP-TS to infinite, volatile and combinatorial setting (which is also includes the popular contextual bandit setting).\n\nThe authors apply these algorithms to the problem of online energy-efficient navigation to demonstrate the performance of the various algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I think the paper lacks some clarity, and the exposition can improve significantly. For example, it requires recalling previous literature to properly understand the set-up in Section 2.1: Is A a finite set? 2^A is the set of a all subsets of A? What happens when A is infinite as in Section 3.2? \n2. Though the dependency on T is sub-linear, I am not sure how to view the dependency on K. Especially in the infinite case. Are there any lower bounds for these settings? It is hard to view how good or bad the bounds are with lack of comparisons.\n3. Building on top of 2 above, I am curious to know if this is the best dependency on T you can get. I am used to seeing \\sqrt{T} regret bounds for bandit algorithms -- is this not achievable in such settings?\n4. I thought that the experimental section was too artificial. If the motivation is to solve the problem in best possible way, there are probably better ways of solving the problem (for example using RL), than naively applying the semi-bandit learning algorithms. If the point is to show the performance of various algorithms, a simple example would suffice. In my opinion, the addition of these experiments does not add any additional value to the paper, and does not change the fact that the papers main (only) contributions are the theoretical bounds."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.\tConnection Between Theory and Empirical Results:\nThe online energy-efficient navigation application is a compelling demonstration of the framework’s practical utility. However, it would be helpful to clarify how the empirical results relate to the theoretical findings. Specifically, can the empirical results be used to verify or illustrate key observations from the theoretical analysis? If this connection is not direct, could you design controlled simulated experiments that more explicitly validate the theoretical regret bounds or insights?\n2.\tExtended Comparison in Table 1:\nIncluding the regret rates alongside the regret bounds in Table 1 would greatly enhance its utility. This addition would allow readers to quickly compare the performance of different algorithms in terms of their theoretical guarantees. An extended table with this information would provide a clearer overview of the contributions and situate the work more firmly within the existing literature.\n3.\tDiscussion of Theoretical Challenges:\nAs mentioned in the weaknesses, a dedicated section or paragraph discussing the theoretical challenges faced in deriving the regret bounds for GP-BayesUCB, GP-UCB, and GP-TS would add significant value. This discussion could cover aspects such as handling the volatility in combinatorial settings, managing the complexities introduced by semi-bandit feedback, or other technical hurdles specific to these algorithms."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tClear and Structured Presentation:\nThe paper is well-written, with clear explanations and illustrations of the research gaps. The novelty of this work is effectively communicated, making it accessible even to readers who may not be deeply familiar with the field.\n2.\tSolid Theoretical Contributions:\nThe authors provide rigorous theoretical analysis and establish new Bayesian regret bounds for multiple algorithms, including GP-BayesUCB, GP-UCB, and GP-TS. The paper addresses a significant gap in the literature by formalizing regret bounds for these settings. Full proofs are provided in the appendices, showcasing the depth of their analysis (though the correctness of these proofs was not verified).\n3.\tPractical Application:\nThe real-world application of their framework to online energy-efficient navigation is both relevant and interesting. It demonstrates the practical utility of their theoretical advancements and highlights the potential for real-world impact."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the combinatorial volatile Gaussian process (GP) semi-bandit problem and provides the first Bayesian regret bounds for the GP-BayesUCB algorithm. In addition to this novel contribution, the authors extend their theoretical analysis to include Bayesian cumulative regret bounds for the GP-UCB and GP-TS algorithms, effectively addressing a notable research gap as highlighted in Table 1. To demonstrate the practical relevance of their framework, the authors apply their methods to a real-world problem: online energy-efficient navigation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tLack of Discussion on Theoretical Challenges:\nWhile the paper provides new theoretical results, it does not clearly articulate the specific challenges encountered in deriving these results for GP-BayesUCB, GP-UCB, and GP-TS. A discussion on the theoretical hurdles and how they were addressed would provide valuable insight into the novelty and difficulty of these contributions.\n2.\tReproducibility Concerns:\nNo code is provided for the experiments. This absence raises concerns about the reproducibility of the empirical results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness section"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper provides the bounds for the Bayesian regret for GP-BUCB, GP-UCB and GP-TS."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper claims to present a novel Bayesian regret bounds for GP-UCB and GP-TS in combinatorial, volatile and infinite arms setting. Further they present the experimental results for a real world application of online energy efficient navigation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Though the work claims to present the bounds for volatile case but the proof for the bounds do not seem to consider it. As an example what would happen when the best arm is not present among the observed arms?\n2. Not significant contribution, the paper mainly builds on the works of Russo & Roy 2014, Srinivas et al 2012 and Takeno et al 2023, where in to compute the Bayesian regret one only needs to compute the expectation over the high probability regret bounds given by the above works.\n3. Lemma 3.1 the results are considered for different regimes of horizons for different cases of the ratio, why not choose the limits as 1 to T for the 3rd case, wouldn't that be a tighter bound?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Does the type of directed graph affect the applicability of the framework? For instance, how does the graph [being cyclic or acyclic] affect the performance of the framework? \n\nI did not notice any discussion in the paper about possible extensions and future directions and further impacts of their research, not even in the supplamentary section. Why? Can you please clarify?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is building on top of other previously published frameworks, however, it is not a straightforward extension of the previous works. \n\nThe experiments (application of their framework) in online energy-efficient navigation problem seem to have added some novelties and value to the paper. \n\nThe paper is written in an excellent way. The explanations for the most important parts of the algorithms are clear. Also the similarities and differences (novelties) of their framework in comparison to the state-of-the-art is clarified properly."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies Gaussian process bandits in the contextual volatile semi-bandit setting. The contribution of the paper is mainly theoretical as it provides novel Bayesian regret bounds for previously designed algorithms. In addition, there is an interesting application of their framework in online energy efficient navigation. This experimental application builds on top of the previously designed experiment of the same application in bandit papers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "No synthetic data experiment. Not even in the supplementary material. In my opinion, synthetic data experiments can significantly add to the development of intuitions about the framework. Also since you have much more control over the creation of the data, it can reveal interesting properties of the framework [in comparison with state-of-the-art]. \n\nAlso, I could not find an experiment with the horizon more than 500 rounds. I am curious about the performance of the frameworks as the horizon goes well beyond T=500. I believe that proper comparison of bandit frameworks [most of the times] comes with running the experiments for long horizons. \n\nI did not notice any discussion in the paper about possible extensions and future directions and further impacts of their research."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present novel Bayesian regret bounds for GP-UCB, GP-BayesUCB and GP-TS for the combinatorial volatile Gaussian process semi-bandit problem and study the application of online energy-efficient navigation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bayesian,\ntitle={Bayesian Analysis of Combinatorial Gaussian Process Bandits},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=50cmx4SrkM},\nnote={under review}\n}"
},
"abstract": {
"value": "We consider the combinatorial volatile Gaussian process (GP) semi-bandit problem. Each round, an agent is provided a set of available base arms and must select a subset of them to maximize the long-term cumulative reward. We study the Bayesian setting and provide novel Bayesian cumulative regret bounds for three GP-based algorithms: GP-UCB, GP-BayesUCB and GP-TS. Our bounds extend previous results for GP-UCB and GP-TS to the \\emph{infinite}, \\emph{volatile} and \\emph{combinatorial} setting, and to the best of our knowledge, we provide the first regret bound for GP-BayesUCB. Volatile arms encompass other widely considered bandit problems such as contextual bandits.\nFurthermore, we employ our framework to address the challenging real-world problem of online energy-efficient navigation, where we demonstrate its effectiveness compared to the alternatives."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-armed bandits",
"Combinatorial bandits",
"Contextual bandits",
"Gaussian processes",
"Energy-efficient navigation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ba1f9290622e1b80007f67f147daa53277598ee7.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bayesian Analysis of Combinatorial Gaussian Process Bandits"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
514rdneWOX | LongHalQA: Long-Context Hallucination Evaluation for MultiModal Large Language Models | main | Active | hallucination benchmark;multimodal large language model | datasets and benchmarks | 3;5;6;8 | 4;5;4;3 | 2;2;2;3 | 2;2;2;3 | 1;2;3;3 | 5.5 | 4 | 2.25 | 2.25 | 2.25 | -0.588348 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "### 1. Logic Behind the Benchmark Creation\n1. Since all LongHalQA data is generated by GPT-4V, isn’t Model X limited to analysing GPT-4V’s specific hallucinations rather than its own?\n2. Could Model X be missing its unique hallucinations because it doesn’t generate its own descriptions or conversations in LongHalQA?\n3. Wouldn’t a model evaluation approach where Model X generates its own text reveal more relevant hallucinations, as done in Kaul et al. and Jiang et al.?\n\n### 2. Lack of Details and Clarity\n1. How are dataset annotations and GroundingDINO used to filter the LongHalQA data? Can details on this process be provided?\n2. How are objects identified in the data? Are these from VG annotations, GPT-4V data, or both?\n3. Other than GroundingDINO, which image understanding tools are used, and how?\n4. If GPT-4V produces hallucination explanation pairs, is there manual verification, especially given its acknowledged hallucination issues (L298)?\n5. In what cases is human verification used for hallucination checking, and how does it impact the dataset?\n\n### Additional Clarity\n1. Can examples be given to the prompt templates in Appendix C to clarify instructions like \"Possible Content\" etc. (Figure 6)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper correctly identifies that many prior hallucination works focus on the narrow topic of object existence at an image level. To overcome they create questions which expand the evaluation to object level descriptions, object locations, attributes etc.\n\nTheir experimental results are numerous and allow the reader see the advantages/disadvantages of each model in the different types of question in LongHalQA (Table 2-5).\n\nThe authors make comparisons of their MCQ method to a free-form generation method in Section 6 and demonstrate the advantages of using MCQ over a free-form method in terms of efficiency of evaluation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new benchmark for evaluating hallucinations in multi-modal large language models (MLLMs).\nThe paper makes use of GPT4V to generate image-level and object-level descriptions and conversation data for a set of images from VisualGenome. These wider range of generated data enables the proposed benchmark, LongHalQA, to evaluate various types of potential hallucination which go beyond the typical object level analysis (e.g. Is there a cat in the image?). The proposed method suggests two types of evaluation: (1) Hallucination Discrimination - the model must answer a MCQ about generated data (potentially containing hallucinations), to determine if the generated data contains hallucinations based on the image and the cause of the hallucination if present; (2) Hallucination Completion - the model must answer a MCQ, correctly selecting the answer which truthfully completes a partial conversation or description.\nThe authors conduct experiments on a range of open-source MLLMs and the closed-source GPT4o. They show that CoT prompting often has little or negative effect on results on LongHalQA. Finally they conduct a study in which hallucinations in free-form generations from their questions yield similar results to using their MCQ formulation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have two main weaknesses with this paper, unfortunately both of which I consider pretty major.\n\n### 1. The logic behind the creation of the benchmark itself.\n\nAs detailed in Section 4, all of the LongHalQA data comes from generations with GPT4V, this includes the descriptions, conversations etc. These generations are then analysed/modified with a number of checks. Furthermore, the question options themselves are generated with GPT4V. Therefore when evaluating a model X using LongHalQA, you are conditioning all reasoning/grounding/recognition of model X on the range of hallucinations GPT4V might make. This leaves a large range of potential hallucinations that are specific to model X which are left to be analysed, which may only be obtained by generating descriptions/conversations using model X rather than GPT4V. Taking Figure 1, GPT4V and the method used in Section 4 have created a hallucination regarding the number of people seated in the carriage. Now this is a hallucination of GPT4V + Section 4, _not_ of model X. Model X may have hallucinated the species of animal, the colour of the carriage etc, all of which is left potentially undiscovered because the hallucinations model X is asked to evaluate in LongHalQA are not its own, I therefore find the logic of this benchmark slightly confused. The free-form generations of methods like that of Kaul et al. and Jiang et al. referenced in the paper need the model being evaluated e.g. Model X to _actually generate_ its own text and therefore its own potential hallucinations.\n\n### 2. Lack of details and clarity.\n\nThe crucial step in this work is the generation of the data for LongHalQA, detailed in Section 4. I find this section to be extremely thin on details and lack clarity.\n1. L291 \"...then analyze and filter them based on dataset annotations and GroundingDINO...\", no information is given on how this process is done.\n2. L297, \"as illustrated in Appendix B.\" Appendix B contains a list of definitions of hallucinations used in this work.\n3. L303, \"Second, names of object present in the data are extracted, and certain image understanding tools such as GroundingDINO...\", there are no details on how objects present in the data are extracted, which data? VG annotations or names in the GPT4V generated data or both? Which image understanding tools other than GroundingDINO are used?\n3. L314-319, GPT4V is being used to generated hallucination explanation pairs, but there is no indication that manual checking is used here despite the authors accepting that GPT4V suffers from \"sever hallucinations\" (L298), the logic here seems confused on the ability of GPT4V to create such specific data which only contains one error which is also useful for evaluation.\n4. L320-L346, same arguments as above with the ability of GPT4V to this accurately.\n5. L344 \"except the hallucination checking that involves optional human verification\" does this mean human verification is used or not? What is the effect of using human verification in the data vs not?\n\nAdditionally as a more general point, the prompt templates used in Appendix C are extremely hard to follow without any examples, e.g. in Figure 6 what is \"Possible Content\"? The main text asks the reader to refer to Appendix C (L465) for details and then appears to simply paste the prompts used with no explanation of what goes where."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "No"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written and easy to follow. \n2. The motivation is reasonable and practical. I think this benchmark will accelerate the development of MLLMs on hallucination. \n3. The analysis of the experiment is relatively comprehensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a long-context hallucination benchmark. This benchmark aims to solve two problems in the existing evaluation pipeline: it is too easy for discriminative tasks and too time-consuming for open-ended generative tasks. To achieve this, the authors propose the LongHalQA, which unifies discriminative and generative tasks as multi-choice problems. Also, they formulate the construction of LongHalQA as a pipeline to construct future hallucination benchmarks with long and complex questions and descriptions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A little small number of evaluated models.\n2. No comparison between the performance of existing methods towards solving the hallucination of MLLMs. I'm interested in whether existing methods have improved on LongHalQA.\n3. Lack of related work about the method about how to decrease the hallucination of MLLMs"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. It is known that [LLMs are non-robust multiple-choice selectors](https://arxiv.org/abs/2309.03882). How do you tackle this problem during constructing this benchmark?\n2. #419 mentions the 'ranking-based accuracy` of Fuyu-8B, while I could not find the corresponding results in Table 4. It is a writing issue?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The proposed benchmark can contribute the further development of this field tackling and analyzing the hallucination of MLLMs.\n2. The proposed unification of discriminative question and generative question largely saves the evaluation cost via reducing the decoding sequence length."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new MLLM hallucination benchmark consisting of both hallucination discrimination and hallucination completion questions. The author unifies both discriminative and generative hallucination evaluation into the form of multiple-choice question where models only have to decode one token as response. The results show the proposed benchmark is challenging for both open-source MLLMs in varying sizes and strong GPT-4o."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Experimental results in Table 8 do not suggest a strong consistency between generation accuracy and mcq accuracy. For example, Fuyu-8b and LLaVA 1.5-7b exhibits score difference -12.41 in mcq while -41.0 in generation. It is necessary to include more methods into consideration, especially thous proposed to tackling hallucination of MLLMs such as LLaVA-RLHF, RLHF-V, Silkie and POVID.\n2. Hallucination pairs are generated by GPT-4V, which are prone to generate hallucinated visual description. The author have to explain how #317 controls the generation quality."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. LongHalQA addresses the limitations of previous benchmarks by creating a comprehensive dataset of hallucination text that mirrors real-world scenarios, providing a more accurate and complex testing environment for MLLMs.\n\n2. By eliminating the need for LLM evaluators, the benchmark ensures more stable and reliable results, avoiding the randomness and computational intensity associated with LLM-based evaluations.\n\n3. The combination of both discriminative and generative evaluation tasks in a multiple-choice format allows for a holistic assessment of MLLM performance in handling hallucinations, making the evaluation process more efficient."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the issue of hallucination in multimodal large language models (MLLMs), where generated text doesn't match the input image. To solve problems with existing benchmarks, the authors propose LongHalQA, a new benchmark with 6,000 complex hallucinatory texts that mimic real-world scenarios. It introduces two tasks: hallucination discrimination and hallucination completion, which combine both discriminative and generative evaluations into a single multiple-choice format. This approach avoids the need for LLM evaluators, making the evaluation process more reliable and efficient. The paper also presents a new pipeline for creating complex hallucination benchmarks and provides experiments showing that recent MLLMs struggle with long, complex text."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. How to evaluate model with the Hallucination Completion task? What is the prefix text for evaluation? Is it the first word?\n2. Why the Hallucination Completion can be seen as generative evaluation? The multi-choice question still is discriminative question.\n3. “then analyze and filter them based on dataset annotations and GroundingDINO”: how did authors analyze and filter?\n4. The proposed benchmark doesnt \n5. Lack of comprehensive survey of hallucination on Large Vision-Language Models.\n[1] Object hallucination in image captioning\n[2] Halle-switch: Rethinking and controlling object existence hallucinations in large vision language models for detailed caption\n[3] FaithScore: Fine-grained Evaluations of Hallucinations in Large Vision-Language Models\n[4] Analyzing and mitigating object hallucination in large vision-language models\n[5] FGAIF: Aligning Large Vision-Language Models with Fine-grained AI Feedback\n6. The proposed LLM-free hallucination benchmark does not offer significant advantages, as the approach still requires various tools, LVLMs, and manual verification, leading to low efficiency.\n7. The benchmark has not demonstrated greater reliability compared to existing ones, such as through experimental validation."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose LongHalQA, an LLM-free hallucination benchmark comprising 6.5k long and complex hallucination text well aligned with real-world scenarios, with two MCQ tasks hallucination discrimination and completion for evaluation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024longhalqa,\ntitle={LongHal{QA}: Long-Context Hallucination Evaluation for MultiModal Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=514rdneWOX},\nnote={under review}\n}"
},
"abstract": {
"value": "Hallucination, a phenomenon where multimodal large language models(MLLMs) tend to generate textual responses that are plausible but unaligned with the image, has become one major hurdle in various MLLM-related applications. Several benchmarks have been created to gauge the hallucination levels of MLLMs, by either raising discriminative questions about the existence of objects or introducing LLM evaluators to score the generated text from MLLMs. However, the discriminative data largely involve simple questions that are not aligned with real-world text, while the generative data involve LLM evaluators that are computationally intensive and unstable due to their inherent randomness. We propose LongHalQA, an LLM-free hallucination benchmark that comprises 6K long and complex hallucination text. LongHalQA is featured by GPT4V-generated hallucinatory data that are well aligned with real-world scenarios, including object/image descriptions and multi-round conversations with 14/130 words and 189 words, respectively, on average. It introduces two new tasks, hallucination discrimination and hallucination completion, unifying both discriminative and generative evaluations in a single multiple-choice-question form and leading to more reliable and efficient evaluations without the need for LLM evaluators. Further, we propose an advanced pipeline that greatly facilitates the construction of future hallucination benchmarks with long and complex questions and descriptions. Extensive experiments over multiple recent MLLMs reveal various new challenges when they are handling hallucinations with long and complex textual data."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"hallucination benchmark",
"multimodal large language model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7a68a8952bd68537c03bd83f2f0c078511d6e908.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "LongHalQA: Long-Context Hallucination Evaluation for MultiModal Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5187wrocJq | Dice-GAN: Generative Adversarial Network with Diversity Injection and Consistency Enhancement | main | Active | text-to-image;generative adversarial networks;self-attention;semantic consistency | generative models | 1;3;5;5 | 3;4;5;4 | 2;2;3;3 | 2;1;2;2 | 2;2;3;2 | 3.5 | 4 | 2.5 | 1.75 | 2.25 | 0.852803 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The idea of adding learnable noise in different training phases and correction with self-attention to improve generation diversity is novel and interesting.\n\n2. The authors demonstrate improved performance on the IS and FID metrics on the CUB dataset and on the FID metric on the MS-COCO dataset.\n\n3. The authors provide an ablation study demonstrating improvements in results by adding Diversity Injection (DI) and Consistency Enhancement (CE) modules."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes DICE-GAN, a single-stage text-to-image GAN to produce high-quality and high-diversity images with improved semantic consistency with text condition. The paper proposes two modules: The Diversity Injection (DI) module, which adds learnable noise to the image features for increasing diversity in generated images, and the Consistency Enhancement (CE) module, which allows the model to dynamically adjust the weights of different image features according to input text conditions for improved semantic consistency and fidelity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The novelty of the work is limited. The idea of feature fusion in Eq 1 in the DI module is not novel and has been explored before[1,2,3] in the context of image generation. Further, the idea of masking features in a condition-dependant manner has limited novelty. 2. Lack of clarity in Sec 3.2 writing and Fig 4. The idea behind Conditional Channel Attention mask($M_c$) and Spatial Attention attention($M_s$) is unclear. The motivation behind generating masks from both average and max channels is also unclear. Further, quantities including $G^{c}_{max}$ and $G^{c}_{avg}$ are missing in Fig 4, making it difficult to understand figure pipeline. 3. The authors claim that Dice-GAN utilizes a single-stage model structure for improved performance but are missing comparisons with multi-stage methods, including StackGAN++[4]. 4. Missing ablation studies: - Why are two feature fusion layers are needed in the DI module? How was this hyperparameter determined? - How does learnable noise $\\sigma$ vary when going from lower to higher layers in the trained model? - Missing ablation on design choices in CE module on use of average and max features and conditional channel attention and spatial attention submodule. 5. The proposed method achieves a lower IS score on the MS-COCO dataset, and the authors argue that this is due to the Inception model used in IS computation being pre-trained on the ImageNet dataset. The authors should provide results on Imagenet or Imagenet subset to back their claims.\n\n[1] Ethan Perez, Florian Strub, Harm De Vries, Vincent Dumoulin, and Aaron Courville. Film: Visual reasoning with a general conditioning layer. In AAAI, 2018. 2, 5\n[2] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019. 5\n[3] Peebles, William, and Saining Xie. \"Scalable diffusion models with transformers.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n[4] Zhang, Han, et al. \"Stackgan++: Realistic image synthesis with stacked generative adversarial networks.\" IEEE transactions on pattern analysis and machine intelligence 41.8 (2018): 1947-1962."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please, see the weakness."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Enhanced Diversity: The Diversity Injection module injects noise and text vectors multiple times, ensuring a broad range of image outputs without sacrificing structure.\n\n2. Improved Consistency: The Consistency Enhancement module dynamically adjusts focus on image regions, aligning visuals closely with text descriptions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, they propose the diversity injection and consistency enhancement module for text-to-image generation. This method contribute to produce high-quality images with increased diversity and enhanced semantic consistency based on text descriptions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A comparison with recently proposed text-to-image generation models is needed. Not only should there be an analysis of issues with GANs, but also recent Diffusion models, along with performance comparisons. Is there a specific reason you only compared with ShiftDDPMs in the case of Diffusion models? Please provide a detailed response.\n\n2. Please provide a detailed explanation of the table and figure captions.\n\n3. Performance comparisons on diverse datasets are required. Additionally, besides IS and FID, comparisons with other performance metrics are requested (e.g., CLIP score).\n\n4. The examples of qualitative results are too limited.\n\n5. There is a lack of experimental analysis demonstrating the effectiveness of the proposed model structure."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does Dice-GAN perform under different levels of input noise? Given the pivotal role of the DI module, understanding the model's sensitivity to noise levels could provide valuable insights into balancing image diversity and visual quality effectively.\n\n2. What measures were implemented to ensure that the DI module does not excessively degrade visual quality due to noise injection? A detailed discussion on the strategies used to balance noise injection and maintain visual quality would be beneficial.\n\n3. Does the CE module exhibit limitations in maintaining semantic consistency for longer, more detailed text descriptions? An analysis of the CE module's performance with nuanced and complex descriptions would provide a clearer understanding of its efficacy in handling diverse linguistic inputs."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of the DI and CE modules marks a significant advancement in text-to-image synthesis. The DI module, which injects noise at multiple stages of generation, and the CE module, which integrates word vectors with hybrid attention, effectively improve both image diversity and semantic consistency.\n\n2. This method achieves SOTA performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The manuscript introduces Dice-GAN which incorporates Diversity Injection and Consistency Enhancement modules to address critical challenges in generating high-quality, diverse images while maintaining semantic alignment with textual descriptions. Experimental results demonstrate that Dice-GAN outperforms state-of-the-art models on the CUB and MS-COCO datasets, underscoring its efficacy in enhancing visual quality and fidelity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The manuscript lacks a detailed examination of the model's performance across varying levels of text complexity. Given that text descriptions can range from simple to highly nuanced, an analysis based on text complexity would provide stronger evidence of the model's robustness and its ability to handle diverse linguistic inputs.\n\n2. The reviewer wants to see the experiment about computational efficiency.\n\n3. The study does not thoroughly investigate the model's capacity to handle various textual attributes, such as color, size, and object positioning. A more focused evaluation of these specific attributes could offer deeper insights into the model's capability to accurately reflect detailed descriptive features and further demonstrate its adaptability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Clear and well-organized presentation.\n- Superior performance to other GAN-based methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "he paper proposes Dice-GAN, an efficient attention-based text-to-image synthesis model. To enhance image diversity, a diversity injection module is introduced, incorporating noise and a self-attention mechanism. A consistency enhancement module, combining word vectors and a hybrid attention mechanism, improves semantic consistency. Experimental results on CUB and COCO datasets demonstrate Dice-GAN's superiority in image fidelity and diversity compared to existing approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited novelty: While the diversity injection module is a contribution, the core idea of adding noise is not entirely novel.\n- Lack of comparison to diffusion models: Given the current dominance of diffusion models in text-to-image generation, a more comprehensive comparison to state-of-the-art diffusion-based methods is essential to establish Dice-GAN's significance.\n- Insufficient discussion of other generative models: The paper could benefit from a more in-depth discussion of how other generative models, such as flow-based models and StyleGAN, could be adapted or combined with Dice-GAN to further enhance diversity and quality."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper propose a novel GAN-based model called Dice-GAN with diversity injection and consistency enhancement."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dicegan,\ntitle={Dice-{GAN}: Generative Adversarial Network with Diversity Injection and Consistency Enhancement},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5187wrocJq},\nnote={under review}\n}"
},
"abstract": {
"value": "In the field of natural language description tasks, one challenge for text-to-image modeling is to generate images that are both of high quality and diversity and maintain a high degree of semantic consistency with the textual description. Although significant progress has been made in existing research, there is still potential for improving image quality and diversity. In this study, we propose an efficient attention-based text-to-image synthesis model based on generative adversarial networks named Dice-GAN. To enhance the diversity of image generation, we design a diversity injection module, which injects noise multiple times during image generation and incorporates a self-attention mechanism to assist the generator in maintaining global structural consistency while enhancing the diversity of images. To improve the semantic consistency, we designed a consistency enhancement module, which enhances the semantic consistency of image generation by combining word vectors and a hybrid attention mechanism to achieve dynamic weight adjustment for different image regions. We conducted experiments on two widely accepted benchmark datasets, CUB and COCO. Dice-GAN demonstrated significant superiority in improving the fidelity and diversity of image generation compared to the existing approaches."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"text-to-image",
"generative adversarial networks",
"self-attention",
"semantic consistency"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3be2b3f54f85aa65260c99a6858191ea851af94d.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Dice-GAN: Generative Adversarial Network with Diversity Injection and Consistency Enhancement"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
51WraMid8K | A Probabilistic Perspective on Unlearning and Alignment for Large Language Models | main | Active | Machine Unlearning;Alignment;Large Language Models | alignment, fairness, safety, privacy, and societal considerations | 5;6;8;10 | 3;3;2;3 | 3;3;4;4 | 3;3;3;4 | 2;2;3;3 | 7.25 | 2.75 | 3.5 | 3.25 | 2.5 | -0.225494 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) The term $\\alpha$, which is used extensively in formulation of metrics and overall thorough the paper, is only loosely defined in appendix. While it may be the usual practice in math-heavy papers, it does substantially confuse readers who are not so proficient. It is quite pity to read a definition or proof and find terms that simply not defined anywhere above. Consider defining $\\alpha$ in the main text of the paper.\n2) How increased $\\lambda_r$ impacts metrics other than diversity?\n3) How does proposed EO objective impacts training efficiency (in terms of increased latency or increased VRAM requirements)? Does it limit it's applicability?"
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1) Authors provide 4 carefully defined metrics, along with necessary guarantee proofs. Overall, the paper is very well composed.\n2) Those 4 proposed metrics allow to comprehensively evaluate model unlearning using entire output distribution (potentially, right now it is limited to MC sampling on certain examples). This appears to be a novel contribution and addresses the lack of probablistic evaluation in the field of unlearning.\n3) This approach can be potentially extended to other tasks which require reliable evaluations.\n4) The proposed entropy optimization objective is clearly defined and is formuated as additive terms which can be applied to existing unlearning losses, which makes it easy to implement. Addressing diversity on retain samples allows to ensure that models remains useful after unlearning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper authors address the problem of reliable unlearning in LLMs. First they introduce a problem, that evaluations based on deterministic point estimates (sampled texts) fail to reliably catch the risks exposed in probablistic outputs. For the case of unlearning, authors state that existing methods rely on a single generated sequence to identify if the information leakage is present or not. Which might not be enough when assessed model might still eventually produce a text with leaked information (with some probability). Therefore authors propose a set of 4 metrics aiming accurately quantify information leakage in model output distribution. Then, authors propose a novel unlearning training objective, which aims to simultaneously minimize model's output distribution entropy on a set of \"forget samples\" while retaining diversity on \"retain samples\". The loss itself is a set of additional terms which can be applied to some existing unlearning objectives. Finally, authors conduct comprehensive evaluation of unlearning with different methods using the proposed metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) While paper title reads as \"A Probabilistic Perspective on Unlearning and **Alignment** for Large Language Models\", authors effectively **do not touch** the alignment in their work, leaving it for further research. Indeed, alignment is only mentioned in Introduction, Limitations and Conclusion. This paper would benefit from having at least small discussion on how the proposed metrics can be extended to other evaluation tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- L118: $V^\\infty$: I believe that the more accepted notation for sequences of arbitrary sequences is $V^*$, where $*$ is the Kleene star. \n - L150: \"extend\" -> \"extent\".\n - L177: \"Binary case\": This exposition feels a bit verbose. My understanding is that you are empirically fitting a Beta distribution based on whether data is leaked through your Monte-Carlo experiments, and outputting a quantile based on a desired safety level $\\alpha$. Please correct me if my understanding is not correct.\n - L197: Make this part more self-contained: especially, what is the Dvoretzky-Kiefer-Wolfowitz inequality and how does it apply here?\n - L211: Proposition 3: This lower and upper bound is reminiscent of the Darboux integrals of $F_n$. If possible, please elaborate on the relationship of this bound estimate to an underlying integral expression. Additionally, it'd be good to reiterate that $F_n$ is the empirical CDF.\n - L225: What does $\\eta_i$ bound? Please discuss.\n - L230: $M_4$: I think it'd be better to call this something like $M_\\sigma$, to be clear that this is not an estimate of the probability.\n - L246: $\\bar X + 2\\bar \\sigma$: why 2? I believe this is a choice based on the safety level, but it would be better to define this as a hyperparameter whose selection is based on the accepted risk level."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper proposes metrics that is defined on the output distribution rather than the point estimate. I think this is a remarkable step and should be considered by various other scenarios.\n - The exposition on the estimation of probability of leakage and bounds of standard deviation are intuitive and sound. \n - The set of experiments presented are convincing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a set of metrics that bounds the risk of unlearning by estimating the bounds of probability of leaks, and the deviation of such random variables. Instead of computing the metric over deterministic point estimates drawn from greedy decoding of LLMs, it proposes the use of Monte Carlo methods, and then estimates these bounds by computation over the empirical distribution. Additionally, the authors proposed some mitigation methods to reduce the risk of leakage when fine-tuning a LLM, and show through experiments that such measures offer potential for reducing the risk and undesired biases in model outputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Notations can be improved in the exposition. For example, $M_1, \\cdots, M_4$ actually stands for estimates of different variables, rather than 4 different ways of estimating the same variable. See questions below for more suggestions. \n - Some derivation are not self-contained (e.g. in Metric 2, the $\\epsilon = \\sqrt{\\frac{\\log (1/\\alpha)}{2n}}$ is not self-contained and is derived from prior work. \n - Expositions tend to be a bit too formal, and lacking some intuitions and insights. See below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. **Novel Perspective on LLMs Unlearning Evaluation** Existing deterministic metrics are apparently insufficient for LLM unlearning evaluation, and this paper introduces a probabilistic perspective to mitigate this issue.\n\n2. **Adequate Mathematical Derivation** In this paper, the authors demonstrate the rationality of their method theoretically and empirically."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a probabilistic perspective on LLM evaluation which shifts from single point estimates towards evaluating entire output distributions offers significant potential for the field of unlearning and proposes a novel framework to directly assess the output distribution of a model. Besides, an unlearning loss based on entropy optimization and adaptive temperature scaling is also proposed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **More Discussion on LLM Alignment Evaluation** Since the titile contains \"ALIGNMENT\", more discussion on this topic should be included in this paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- For entropy optimization, I'm not sure about the intuition to minimize the entropy on Dfg. Wouldn't this lead the model to be confident on a different answer, which I think might be a strange thing to enforce.\n- It would be interesting to how unlearning (as well as the proposed optimization methods) affects the model's general ability."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Designing good evaluation metrics is important for a reserach direction such as unlearning, and this work indicates a limitation of existing metric and correspondingly proposes improved metrics.\n- The proposed metrics and methods are shown to be effective for two recent unlearning benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces a probablistic perspective for LLM unlearning evaluation. Instead of relying on deterministic greedy decoding in existing evaluation methods, this work takes a probablistic framework and derive metrics considering the high-probable output distributions. The proposed metric demonstrates the limitations of previous methods for their lack of identifying false unlearning. Moreover, a novel loss based on entropy optimization and adaptive temperature scaling are introduced to improve model unlearning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There is a lack of algorithmic description on how the proposed metrics are calculated, without which readers who lack certain statistical machine learning knowledge or who want to implement the metrics would find it difficult to understand and apply the proposed metrics.\n- The proposed metrics are only tested for the unlearning case, which surely is indeed a well-suited scenario. Nevertheless, it would nice if it can be extended to more use cases, such factuality, to verify the effectiveness of the metrics."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We demonstrate that existing deterministic evaluations in large language models are insufficient and propose a novel probabilistic evaluation framework that considers the whole output distribution of a model."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Probabilistic Perspective on Unlearning and Alignment for Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=51WraMid8K},\nnote={under review}\n}"
},
"abstract": {
"value": "Comprehensive evaluation of Large Language Models (LLMs) is an open research problem. Existing evaluations rely on deterministic point estimates generated via greedy decoding. However, we find that deterministic evaluations fail to capture the whole output distribution of a model, yielding inaccurate estimations of model capabilities. This is particularly problematic in critical contexts such as unlearning and alignment, where precise model evaluations are crucial. To remedy this, we introduce the first formal probabilistic evaluation framework in LLMs. Namely, we derive novel metrics with high-probability guarantees concerning the output distribution of a model. Our metrics are application-independent and allow practitioners to make more reliable estimates about model capabilities before deployment. Through a case study focused on unlearning, we reveal that deterministic evaluations falsely indicate successful unlearning, whereas our probabilistic evaluations demonstrate that most if not all of the supposedly unlearned information remains accessible in these models. Additionally, we propose a novel unlearning loss based on entropy optimization and adaptive temperature scaling, which significantly improves unlearning in probabilistic settings on recent benchmarks. Our proposed shift from point estimates to probabilistic evaluations of output distributions represents an important step toward comprehensive evaluations of LLMs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Machine Unlearning",
"Alignment",
"Large Language Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/76e657968639e5b60f05d2f112c07c2c586cd7c2.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/0f9019f956a111199ce1427499e912b8acedd0cf.zip"
},
"title": {
"value": "A Probabilistic Perspective on Unlearning and Alignment for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
52Idqv2FNY | Correlating and Predicting Human Evaluations of Language Models from Natural Language Processing Benchmarks | main | Active | language models;evaluations;human evaluations;benchmarks;NLP benchmarks | datasets and benchmarks | 1;3;5;8 | 4;3;3;5 | 1;2;4;4 | 1;2;3;4 | 1;1;2;4 | 4.25 | 3.75 | 2.75 | 2.5 | 2 | 0.495519 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) Why chatgpt 3.5? Could you justify the choice of this model? Why was chatgpt 3.5 the model chosen for comparison, is it a reasonable choice for a baseline? \n\n2) Could you generally talk about the distribution of the Likert scale that you got from the pairwise evals? Was there anything at all in which chatgpt was substantially better and generally chosen? (Assumption here that I suppose llama-2 would be usually better than chatgpt 3.5 in all cases)\n\n3) if these outputs were obtained from Chatgpt 3.5, which API was it received from, and what was the exact cutoff (e.g., ChatGPT-3.5-0604, etc.)?\n\n4) Pairwise evals ultimately show revealed preferences and model choice between two outputs. Do you think this translates to human evaluation directly on model outputs (not comparisons) on NLP parameters like coherence, semantic relevance, factual relevance, etc.? Could you comment on the choice of pairwise evals?\n\n5) Just a general question about related work: is there no related work? (while this correlation aspect might not have been explicitly studied), Studies have considered which of them is better in MT, summarization, and other NLP areas. Can you provide a more comprehensive overview of related work, including studies that have compared human evaluations and benchmarks in specific NLP tasks like MT/Summarization, and contextualize this work in the broader field?\n\n6) Could you conduct a detailed analysis of features/characteristics shared by highly correlated benchmarks? I think that would help a lot in designing benchmarks in the future."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1] The question at the center of the paper -- \"Correlation between NLP benchmarks and Human Evaluations,\" is an important central question to NLP evaluation in general. Human Evaluations are considered (somewhat so) the gold standard of evaluation but are extremely time-consuming and expensive to run; as models get more capable the human evaluations also get even more costlier because now we require experts to evaluate vs requiring less advanced folks earlier, but we can reliably construct more difficult benchmarks for models, so if these two things are correlated, perhaps lesser focus can be placed on human evaluations. \n\n2] Predicting Human Evaluations is a difficult task, and LLMs as judges are being increasingly explored as an alternative to human evaluations. The method in the paper also showcases some important insights into this process."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper initially explores the correlation between NLP benchmarks and human evaluation. With the advent of increasingly capable LLMs, human evaluations have become a steady and major alternative choice to evaluate the efficacy, performance and capabilities of LLMs. An important question that generally arises with the choice is whether NLP benchmarks are useless since human evaluations are costly and time consuming and are not always a gold standard. Where do NLP benchmarks fall? This paper explores this question and also explores the possibility of predicting human evaluations from NLP benchmarks. \n\nTwo key questions are asked:\n- To what extent are human evaluations and NLP benchmarks correlated?\n- How well can benchmarks predict expensive and time-intensive human evaluations?\n\nThe researchers use all the Llama chat-2 models (7,13,30 and 70B parameters) to establish this, which were trained on 2T tokens and fine-tuned using SFT and RLHF. Human evaluations are collected by evaluating the Llama2 chat models pairwise against ChatGPT 3.5 on dataset a of single-turn and multi-turn prompts, where responses are sampled from each model. 3 Human annotators independently provide a pairwise comparison on the Likert scale (1 to 7, where 1 means chat llama preferred and 7 means chatgpt 3.5 preferred). uThey end up doing a large-scale study spanning factual questions, language assistance, writing, procedural questions, reasoning and many more. The Chat Llama 2 models are evaluated on many popular NLP benchmarks right from AGI Eval, Ai2 Reasoning Challenge, Big Bench Hard, Boolq, commonseqa, GSM8k, MMLU, MATH, QuAC, PiQA and many more. Standard evaluation processes are used. \n\nThe findings revealed that NLP benchmarks are broadly highly correlated with human evaluations, with certain benchmarks showing particularly strong correlations. The most predictive benchmarks included specific subsets of MMLU (covering topics like nutrition, human aging, and sociology), portions of BIG Bench Hard, HellaSwag, ARC, RACE, PIQA, Natural Questions, QuAC, and CommonSenseQA. However, some benchmarks showed weaker correlations, including ETHOS, Kth Sentence, most of Inverse Scaling, OpenBookQA, COPA, SciBench, and SIQA.\n\nUsing overparameterized linear regression, the researchers successfully demonstrated that NLP benchmark scores could predict human evaluation scores with reasonable accuracy. Despite the small sample size of only four models, leave-one-out cross-validation showed promising results, suggesting that faster and cheaper NLP benchmarks might effectively predict slower, more expensive human evaluations in many cases.\n\nThe authors note several limitations, including the small sample size, the assumption of linearity in their predictive models, and potential limits to generalizability across different model families thus rounding up the study and paving the way for future work as well."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1] The small sample size brings into question the generalizability of these insights and results.\n\n2] Only uses GPT-3.5 as the comparative model, no insight is provided into why this is the case? And also lacks any discussion of whether chatgpt 3.5 is a reasonable choice of a baseline. \n\n3] Perhaps a granular analysis of what makes a benchmark more correlated? Is there something common in the correlated benchmarks? This would also pave the way to designing and determining better benchmarks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- More LLMs should be covered in this study. I understand the computational cost during inference and the cost in human evaluation, but four LLMs are definitely too few to support subsequent experiments.\n- I do need more details of the human evaluation in your study. What makes me most confused is the selection of the prompts. Why don't you use the same question sets as those of automated NLP benchmarks? If there are too many, you can sample from each dataset. Now there is a mismatch between the prompts (questions) in human evaluation and automated NLP benchmarks and the mapping relationship is not clear. Even if ignoring the mismatch issue, you should provide the number of prompts per area and categories used in human evaluation.\n- The experiments of one-rank decomposition in Section 3.3 need to be further explained. Can you better state your motivation of conducting this decomposition and what insights can we draw from that?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The research question of this paper—the relationship between evaluation results from automated NLP benchmarks and human evaluations —is generally important and meaningful. Recently, numerous automated benchmarks and human evaluations have emerged separately, but there has been little research on the relationship between them.\n- This paper covers many automated NLP benchmarks and includes a large-scale human evaluation, which lends a certain level of generality to its results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the relationships between the evaluation results of automated NLP benchmarks and those of human evaluation. It mainly revolves around two research questions: how well human evaluations and NLP benchmarks are correlated with each other, how well NLP benchmarks can predict human evaluation. Specifically, the authors develop a set of 1917 prompts organized by areas, categories, and subcategories, selects four LLMs from Llama 2 family, gets their reponses to the prompts, and conducts a large-scale pairwise human evaluation. The evaluation results of the four models on many automated NLP benchmarks are also derived. Then, the paper analyzes the correlations between human evaluation and automated NLP benchmarks and finds that they are highly correlated in most cases. Furthermore, the authors decompose the correlation matrix into rank-one components and demonstrate the communities between human evaluations and NLP benchmarks. Finally, the authors tried to fit a regression model to predict the human evaluations with automatic evaluation results as inputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the idea of this paper is beneficial, many obvious flaws diminish its value.\n\n- This study uses only four LLMs, which is too few. This leads to\n - The correlations between automated NLP benchmarks and human evaluation are calculated merely from two four-dimension vectors, which is unreliable \n - Insufficient experiments for predicting human evaluation from automated NLP benchmarks, despite cross-validation conducted in the paper\n\n- The paper lacks key details, including but not limited to how the prompt set used in human evaluation is obtained, the human evaluation process and its reliability (e.g. inter-annotator agreements), details of how the correlation is calculated (what is ~150 evaluation process?), the settings for linear regression. This not only creates difficulty in understanding but also raises doubt about the rigor of this study.\n\n- The presentation of the paper could be improved. For instance, the font sizes in Fig 3, the upper part of Fig 4, and Fig 6 are too small, making it hard to read."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The motivation and research questions of this work are very interesting and significant. Considering that language models are becoming increasingly powerful, many traditional NLP benchmarks may have lost their discriminative power, leading researchers to turn to human evaluations, which are more costly and harder to reproduce. By analyzing the consistency between NLP automatic evaluation benchmarks and human evaluations, this work aims to identify highly consistent benchmarks to simulate human evaluations, thereby reducing evaluation costs. Their experiments cover a large range of datasets and settings, including constructed various categories of human evaluation data and many common NLP automatic evaluation benchmarks, demonstrating a very comprehensive effort."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work attempts to explore the correlation or consistency between common NLP automatic evaluation benchmarks and human evaluations in analyzing and comparing the capabilities of language models. They cover a wide range of datasets and conduct experiments on four different sizes of Llama 2 models and GPT-3.5, employing human annotators to provide evaluation data. They find that there is a high correlation between automatic benchmarks and human evaluations, and they identify which benchmarks show stronger correlations. Furthermore, they also fit models to predict human evaluation scores of language models from academic evaluation scores."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the research topic of this work is meaningful, it is also actually very complicated and corresponds to a more challenging analysis process. Even though the work has tried to handle the experimental data and present corresponding results as macroscopically as possible, their experimental analyses remain confusing and fail to help readers capture the main points. From Figure 1 onward, the clarity and readability of the charts decline rapidly, and by Figure 6, it becomes nearly impossible to extract any information as the fonts are extremely small and the visualized results are poorly presented.\n\nSome analytical settings in the paper are unclear or somewhat unreasonable. For example, in line 149, what does the \"evaluation process\" refer to, and why are approximately 150 combinations calculated in total? What do they represent? Additionally, if I understand correctly, it seems unfair to compare human evaluation results across mixed task types with different NLP automatic evaluation benchmarks that may focus on testing certain different abilities."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- Q1. How do the authors conduct the experiment using the Llama-2-30b model? In fact, there is no 30b model in the LLama2 series, and I assume the authors are referring to the Llama-2-34b model. However, even Llama-2-34b-chat (or the base model) is not officially released, so I wonder how this paper conduct experiments using Llama-2-34b-chat."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- This paper studies a very important problem: whether scores on NLP benchmarks correlate with human evaluation results. This can potentially guide researchers to construct better benchmarks\n- This paper studies the possibility of using NLP benchmarks to predict human evaluation results. Considering the efforts of human evaluation, the problem studied in this paper can help us develop LLMs faster."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the relationship between NLP benchmarks and human evaluation results and aims to understand what roles NLP benchmarks should play in the era of LLM. They conduct human evaluations on four Llama 2 chat models and calculate the correlation between human evaluation results NLP benchmarks, spanning from open-domain QA, MMLU, and safety/adversarial datasets. They find that most NLP benchmarks correlate well with human evaluation results, and it is possible to predict human evaluation results based on scores on NLP benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The experiment parts are highly unclear and hard to comprehend. It is unclear how the correlations are calculated between human evaluation and NLP benchmark scores. There is even no **Experiment Setup** section in this paper, and the part that most looks like the experiment setting is the first seven lines of Section 3. After repeatedly reading those lines, I still cannot understand how the correlations are calculated. Precisely,\n - How do you aggregate the scores of different shots?\n - Why do you aggregate the results of different shots?\n - What is the number of shots?\n - How is the prompt formatted?\n - How are the demonstrations in the few-shot selected?\n - Where does the number *150* on Line 148 (page 3) come from?\n - How is the human evaluation conducted? How many samples are there in the single-turn and multi-turn dialogues? How are the topics selected? What is the distribution of the data?\n - If the paper only uses four models, is the correlation coefficient calculated using only the benchmark scores of 4 models and the human evaluation results of the models? This means we are only calculating the correlation coefficient between two sets for numbers with only four elements in each set. \n\n- There are only four models used in this paper: the four chat models in Llama-2 with different numbers of parameters. The abilities of those models are very distinct, so it is easier for human evaluators or NLP benchmarks to distinguish the strengths of these models. A more challenging and realistic scenario is to consider more LLMs whose abilities are more diverse.\n\n- The figures in the paper are terribly and poorly formatted. Those figures do not seem like they are designed to be read. The font sizes in the figures are too small to read and clustered together. I need to zoom in to 400% on my computer to see the words.\n\n- Section 3.3 is highly unclear, without explaining what the *communities* this section is discussing and with no experiment settings that allow the readers to understand what is happening now. \n\nConsidering that the experiment setting is highly unclear and the results are poorly presented, it is impossible to evaluate the contribution of this work. The paper requires major refinement. However, the paper studies an important problem, and I encourage the authors to keep working on this topic."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We compare human evaluations and academic evaluations of language models against one another"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024correlating,\ntitle={Correlating and Predicting Human Evaluations of Language Models from Natural Language Processing Benchmarks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=52Idqv2FNY},\nnote={under review}\n}"
},
"abstract": {
"value": "The field of natural language processing (NLP) historically evaluated language models using benchmarks with automated metrics. However, the recent advent of highly capable chat language models (LMs) has caused a tectonic shift from NLP benchmarks to human evaluations. The relationship between these two evaluation processes is unclear and underexplored for chat LMs. Broadly, to what extent are human evaluations and NLP benchmarks correlated with one another? How well can computationally inexpensive and automated benchmarks predict expensive and time-intensive human evaluations? Which benchmarks provide predictive signals for human preference for LMs? What role, if any, should benchmarks play in the era of chat LMs? To answer these questions, we conducted a large-scale study of the relationships between human evaluations and benchmarks. We show that benchmarks are broadly highly correlated with human evaluations, and we identify which benchmarks exhibit strong correlations with human evaluations and which do not. Having established that reliable correlations exist, we fit models to predict a language model's human evaluation scores from its academic evaluation scores and provide evidence that such predictive models can generalize across LM scales."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"language models",
"evaluations",
"human evaluations",
"benchmarks",
"NLP benchmarks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/afcddce2632006a71cf976e3ad0a2c20cc185756.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Correlating and Predicting Human Evaluations of Language Models from Natural Language Processing Benchmarks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
52UtL8uA35 | Deep Networks Learn Features From Local Discontinuities in the Label Function | main | Active | Deep Learning;Feature learning;Interpretable;Local Discontinuities;Deep learning theory;Deep neural architectures;Supervised learning | interpretability and explainable AI | 3;5;8;8 | 4;4;2;3 | 3;2;2;4 | 1;2;3;4 | 2;2;3;4 | 6 | 3.25 | 2.75 | 2.5 | 2.75 | -0.852803 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Authors suggest that they use a sigmoid instead of an indicator function to make the DLGN differentiable for training. Have authors considered using a temperature parameter for the sigmoid (potentially annealed during training), as common in other continuous relaxation methods?\n- On lines 406-407 authors suggest that they use DBScan for clustering hyperplanes due to this algorithm being “robust to outliers” — why do authors expect outliers to be a significant issue here?\n- How do authors explain the observation that certain layers of the DLGN are more prone to matching the true discontinuities than others (Figure 2)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Originality. The DLGN is an interesting architecture that combines deep linear networks with the gating mechanism to construct a novel class of non-linear models. One could also treat DLGN as a novel decision tree parametrization which, when relaxed using a sigmoid, can be learned by back-propagation. To the best of my knowledge, DLGN is a novel, original model architecture, though its connection to soft decision trees should be studied more carefully."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel model architecture (deep linearly gated network, DLGN) and studies the way in which this model seeks out “label discontinuities” in the data during training. This analysis is enabled the fact that we can enumerate such label discontinuities in a DLGN. The synthetic datasets with known discontinuities are generated by another model — an oblique decision tree (ODT). The authors also show how an ODT can be constructed from a trained DLGN, for the purpose of interpretability. Finally, the paper presents results of fitting a DLGN to several UCI regression tasks, comparing performance to several tree-based, kernel-based, and NN-based baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Lack of focus. At the moment, the paper’s focus is split between a study of feature learning using DLGN (chapter 5), and a study of the DLGN itself as an expressive, yet interpretable model architecture (chapter 6). To me these are two orthogonal contributions, and the paper would be stronger if authors focused on one of these.\n- Significance. The significance of the proposed model and the feature learning study presented in the paper is not clear to me. \n 1. Capturing “label discontinuities” (are these not simply decision boundaries?) is at the core of solving a classification problem, hence it is not surprising that a model which works well on the task has to discover such hyperplanes — I don’t see an alternative way that a model can solve a task. The real question is how an over-parametrized model can correctly identify high-dimensional decision boundaries given limited data — this is the main mystery in the theory of deep learning at the moment, and one that this paper doesn’t shed much light on.\n 2. To understand the significance of the findings for deep learning, we would need to understand the relationship between a DLGN and a DNN. While it is nice (albeit, not surprising) to see how a DLGN uncovers the true “label discontinuities”, how can we know that a DNN will demonstrate the same behavior?\n 3. While the proposed architecture is appealing due to potentially being both expressive and interpretable, results on the real (UCI) datasets suggests that DLGN is comparable in performance to standard tree algorithms. There is little evidence that we should prefer the proposed architecture to e.g. the well-studied random forests, which we could also argue to be “interpretable”. (I do not consider results on synthetic data to be good evidence, given the connection between ODTs used to generate the data and DLGN.)\n 4. While the authors claim that the proposed architecture is interpretable, no interpretations of the models fit to real data are given. If interpretability if the main selling point of the architecture, I would expect a deeper analysis focused on interpretability.\n- Presentation. Please consider moving the DLGN model diagram to the main text: it’s difficult to understand the model architecture from the formulas alone. Also, please try to stick to academic language, and avoid informal phrases like “well nigh impossible”, “handily outperform”, “succeed comfortably”, “about the same”, etc."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. What's the purpose of defining the manifold $\\mathcal{M}$ in line 133?\n2. In line 239, Equation (4), is there a missing transpose on $\\mathbf{u}_{i_1}^1$?\n3. I find it difficult to understand how the computational cost of a forward pass for Equation (1) is less than twice that of a ReLU network with $mL$ nodes. Could the authors provide further clarification on this?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. Clear writing. The authors study a specific class of problems with great clarity. \n2. The authors' persistence in tackling a challenging yet manageable problem setting is commendable.\n3. Like a black-box learner, the DLGN is able to learn non-linear features. Yet, it still provides mechanistic interpretability.\n4. DLGN outperforms both tree-based and non-tree algorithms, as well as ReLU networks, in the oblique decision tree setting, while maintaining strong competitiveness on real-world tabular datasets.\n5. The authors provide a framework that paves the way for future research and development."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce a model called the Deep Linearly Gated Network (DLGN) to study feature learning, specifically in binary classification tasks defined by an oblique decision tree labeling function. They use DLGN to test the hypothesis that during training, the model’s discontinuities move towards label function's discontinuities. The paper includes evaluations on dozens of open tabular datasets to compare DLGN with ReLU networks and tree-learning algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I'm not seeing effective weaknesses."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper is a bold attempt to deepen our understanding of feature learning in deep learning. The hypothesis, data setup, network architecture, and approach to interpretability are unconventional, and they bring a fresh perspective to the literature. This work has the potential to inspire new ideas and serve as a valuable starting point for further exploration."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose a mechanism to intuitively explain why neural networks can surpass kernel methods through feature learning. They hypothesize that the feature learning process involves the alignment of model function discontinuities with label function discontinuities during training. To explore this, they introduce a new network architecture called the Deep Linearly Gated Network (DLGN), designed as a surrogate for ReLU networks. They argue that this architecture retains similarities to ReLU networks while offering easier interpretability. Under this framework, they provide empirical evidence showing how model function discontinuities move toward label function discontinuities during training, facilitated by feature learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "From my perspective, while this work is intriguing, it is not yet ready for formal publication. My concerns are as follows:\n\n1. The authors reference previous studies examining the dynamics of single hidden layer models under specialized data and settings to push beyond kernel methods or deep linear models (Damian et al., 2022; Ba et al., 2022). They appropriately note that these analyses, often focused on specific data settings like the parity function, fall short of addressing the needs or behaviors of deeper networks. However, numerous works also investigate complex feature learning with deep neural networks, such as https://arxiv.org/abs/2305.06986 and https://arxiv.org/pdf/2311.13774. These studies should be acknowledged and compared with the current work.\n\n2. I am skeptical about the extent to which the new architecture resembles a ReLU network. Additionally, it is unclear how this intuition or design could extend to CNNs or transformers. Besides, the current version only applies to binary classification, whereas a universal feature learning mechanism should ideally apply across setups, such as binary classification, multi-class classification, and regression. I am unsure how the proposed intuition extends to these broader contexts. For example, existing papers (such as the two mentioned above) demonstrate that neural networks can efficiently learn $h = g \\circ p$ with $p$ quadratic and $g$ nonlinear via feature learning. How would this intuition explain such cases?\n\n3. In Section 3, the introduction to ODT progresses too quickly. The paper would benefit from a more mathematically detailed introduction to the new concepts presented in this section."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) Why do you even introduce a manifold $M$ in your notation in line 133? It is never used \n2) On line 192, I don't think I agree that \"no other hyperplane other than the internal nodes has this property\". e.g. consider any other hyperplane that is not one of the internal nodes in the plot in Figure 1; doesn't it also have points of both labels on either side of it?\n3) You introduce this formal notion of $\\gamma(R, f)$ as the local discontinuity coefficient in Section 3, but then never mention it anywhere later in the paper. How would you explain the results in say Figure 2 and 3 in the context of this quantity? Could you elaborate on this a bit?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed theory is intriguing and indeed very interesting. The plots in Figures 2 and 3 are indeed striking---they illustrate how faithfully the DLGN neuron activations are increasingly aligning with the ODT hyperplanes over the process of training. These observations support the theory put forth by the authors about neural network architectures possibly picking up on the discontinuities in the labelling function. The clustering procedure to extract a decision tree is also very interesting, and empirically seems to work well (as suggested by Figure 3). Overall, I find the theory proposed by the authors, along with the striking empirical illustrations, very interesting. These could motivate further theoretical investigations for such phenomena."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a theory to explain how feature learning happens in neural networks. The authors posit that neural networks align (or rather, get attracted to) the discontinuites in the way the target label changes over the input domain. As a pilot test for this theory, the authors consider a setting where the target function to learn is an Oblique Decision Tree (ODT). ODTs correspond to decision trees where each internal node is a linear threshold. Thus, the hyperplanes associated to the nodes in the decision tree split up the input space into different labeled regions (see Figure 1). The hyperplane corresponding to the root is \"most discontinuous\" in terms of how the label changes on both sides of it (owing to further and further splits by its many descendants). The authors posit that the training of non-linear neural networks procedurally aligns the model with these discontinuities.\n\nAs a tractable model to further empirically study this theory, the authors propose a novel neural network architecture which they term Deep Linearly Gated Networks (DLGNs). DLGNs are somewhere in between standard nonlinear (e.g., ReLU activated) deep networks and deep linear networks. The function computed by a DLGN can be writen as a large summation of terms of the form $f_\\pi \\cdot g_\\pi$. Here, $f_\\pi$ is a product of the signs of activations of neurons in a deep linear network (a single neuron from each layer). $g_\\pi$ is a product of the activations themselves (a slight detail is that the weight matrices producing the activations in $f_\\pi$ and $g_\\pi$ are different). Thus, we can think of $f_\\pi$ as an indicator of the intersection of halfspaces, while $g_\\pi$ is the weight we add up if the indicator turns out to be true.\n\nThe authors fix the target function to be an ODT, and train the DLGN architecture on synthetic data labelled by the ODT. The suprising observation is given in Figure 2. At the end of training, if one plots the hyperplanes corresponding to the linear threshold at every neuron, one observes that the hyperplanes (at least in the later layers of the DLGN) align very well with the hyperplane splits in the ODT! Furthermore, Table 1 shows that most of these linear thresholds align with some hyperplane in the ODT. That is, the neurons in the DLGN architecture are \"getting attracted\" to aligning with some internal node in the ODT.\n\nThe authors next use this empirical observation to extract a decision tree out of a trained DLGN. Namely, they plot the linear thresholds corresopnding to all the activations after training, and then cluster these thresholds. The center of the largest cluster is chosen to be a root node hyperplane, and then we recurse this procedure on data on either side of this hyperplane. This procedure is pictorally well illustrated in Figure 3. Again, one can see that on performing such clustering-based decision tree generation, the first cluster center aligns very well with the root node hyperplane, and the phenomenon continues as we recurse. The upshot is that one can extract an interpretable model from the DLGN..\n\nFinally, the authors perform experiments on real-world classification data to illustrate that the classification accuracy of DLGNs is somewhere in between standard ReLU networks and other simpler non-neural network algorithms. Thus, DLGNs have the added benefit of being interpretable, but more powerful than stanard decision trees."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The pilot experiments done by the authors are admittedly specialized. Namely, they don't actually consider standard ReLU networks, but only the DLGNs they propose. Furthermore, they only consider cases where the target function is conveniently an ODT. While this is totally okay as an initial starting point, it does raise the question about whether such empirical phenomena of the neurons aligning with the discontinuities also arise in cases where the target function has discontinuities of a different nature (like curvy discontinuities, etc). But this is not a significant weakness, as it seems beyond the scope of a pilot study. But it would be really interesting to visualize the activations of the neurons (say with quadratic thresholds) for when the target function is also composed of curvy discontinuities."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Deep neural networks outperform kernel machines by learning features through discontinuities in label functions during gradient descent training, showing better performance and offering greater interpretability compared to ReLU networks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024deep,\ntitle={Deep Networks Learn Features From Local Discontinuities in the Label Function},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=52UtL8uA35},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep neural networks outperform kernel machines on several datasets due to feature learning that happens during gradient descent training. In this paper, we analyze the mechanism through which feature learning happens and use a notion of features that corresponds to discontinuities in the true label function. We hypothesize that the core feature learning mechanism is label function discontinuities attracting model function discontinuities during training. To test this hypothesis, we perform experiments on classification data where the true label function is given by an oblique decision tree. This setup allows easy enumeration of label function discontinuities, while still remaining intractable for static kernel/linear methods. We then design/construct a novel deep architecture called a Deep Linearly Gated Network (DLGN), whose discontinuities in the input space can be easily enumerated. In this setup, we provide supporting evidence demonstrating the movement of model function discontinuities towards the label function discontinuities during training. The easy enumerability of discontinuities in the DLGN also enables greater mechanistic interpretability. We demonstrate this by extracting the parameters of a high-accuracy decision tree from the parameters of a DLGN. We also show that the DLGN is competitive with ReLU networks and other tree-learning algorithms on several real-world tabular datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Deep Learning",
"Feature learning",
"Interpretable",
"Local Discontinuities",
"Deep learning theory",
"Deep neural architectures",
"Supervised learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c93c3a722c5f00913c7a9dd3e7e4d65ee4199ca5.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/3af52b256e10d9af378b88cd84a90a9c7265042e.zip"
},
"title": {
"value": "Deep Networks Learn Features From Local Discontinuities in the Label Function"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
52XG8eexal | State-space models can learn in-context by gradient descent | main | Active | state-space models;in-context learning;linear recurrent networks;mesa-learning | foundation or frontier models, including LLMs | 3;3;3;5 | 5;4;5;3 | 3;4;3;2 | 1;1;2;2 | 2;3;3;3 | 3.5 | 4.25 | 3 | 1.5 | 2.75 | -0.870388 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see weaknesses 1-3."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) Enhances understanding of inductive biases in SSMs for ICL tasks.\n\n(2) Bridges key domains such as SSMs, ICL, and mechanistic interpretability.\n\n(3) Demonstrates generality by extending results to multi-step cases, multiple layers, and multi-dimensional data.\n\n(4) Empirical analysis shows practical alignment with the theory-based construction in simple cases.\n\n(5) Clarity: While some aspects could be improved (such as adding a figure to illustrate the main ideas in Section 3.1), the paper is clear, well-motivated, and easy to follow. It starts with simple cases and provides clear definitions, making it an enjoyable read!"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates the ability of SSMs to perform ICL through gradient descent during the forward pass over recurrent steps. It provides a theoretical analysis of various ICL scenarios, demonstrating that simple SSMs, equipped with input and output-dependent processing, can accurately mimic gradient descent when data is presented as a sequence. This theory offers an explanation for the capacity of modern SSMs to execute ICL and outlines a network circuit capable of handling such tasks. Empirical experiments confirm that the proposed circuit can effectively learn and perform small-scale synthetic tasks in practice."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The authors have **overlooked important related work in this domain**. For example, [1] presented at NeurIPS 2016, demonstrates that **RNNs can perform gradient descent during the forward pass**. Additionally, Section 3.1 in the current paper shares several similarities with Section 2 of [1]. To be clear, this is not an accusation of plagiarism, but rather an indication of missing key references. While there is a difference in scope (RNNs versus SSMs), this oversight reduces the originality of contribution #1. I kindly ask the authors to specify the principal differences between the approach taken by [1] and the approach used by SSMs in their work to better highlight the novel contributions. \n\n(2) **Overlooks simple alternative approaches:** While the theoretical analysis is accurate, the authors overlook significant alternative approaches. References [2-4] demonstrate the connection between S6 layers and attention, showing that S6 can be more expressive than attention without the softmax. Additionally, various ICL studies for transformers omit the softmax (see [5-6] as an example), allowing direct conclusions that could extend to SSMs. **Given the extensive exploration of ICL capabilities in transformers, discussions on the ICL potential of SSMs should consider this reduction approach**. I recommend that the authors evaluate whether this approach could yield additional theoretical developments to strengthen their analysis. Moreover, I suggest that the authors explicitly state the advantages of their approach compared to the proposed alternatives. For instance, Section 3 introduces specific circuits that cannot be achieved through simple reductions. \n\n(3) **Understanding ICL in SSM variants can be enhanced** by examining their sub-components. Previous research indicates that S6 layers exhibit significantly better ICL capabilities than earlier SSMs [7]. However, while the authors highlight input and output-dependent processing as crucial features, they do not empirically ablate these features across various ICL tasks, nor do they provide a detailed theoretical analysis to substantiate this claim explicitly. I recommend adding a subsection that explores these aspects in depth. It is also important to note that input- and output-dependent processing can be implemented through various gating mechanisms. Hence, this claim could be considered somewhat ambiguous, as gated state-space models have been previously studied without demonstrating the same level of ICL capabilities as models like Mamba / S6.\n\n\n(4) **The claims regarding GD-SSM** (“Our construction, which we call GD-SSM, is not restricted to in-context learning tasks and performs well on general-purpose prediction problems”) do not hold, and much **more empirical analysis is required to justify** them.\n\n___\n\n[1] Learning to learn by gradient descent by gradient descent. Andrychowicz et al.\n\n[2] The Hidden Attention of Mamba Models. Ali et al.\n\n[3] Transformers are SSMs: Generalized Models and Efficient Algorithms Through Structured State Space Duality. Dao et al.\n\n[4] Understanding the differences in Foundation Models: Attention, State Space Models, and Recurrent Neural Networks. Sieber et al.\n\n[5] Transformers Learn In-Context by Gradient Descent. Oswald et al. (see section 2)\n\n[6] Why Can GPT Learn In-Context? Language Models Implicitly Perform Gradient Descent as Meta-Optimizers. Dai et al. (see section 3.1)\n\n[7] Can mamba learn how to learn? a comparative study on in-context learning tasks. Park et al."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can you explicitly define GD-SSM? Perhaps even provide pseudo-code? What are its learnable parameters? It is defined in 283 after the fact, but I think a more explicit definition would be helpful.\n\nQuestions related to experiments and first two bullets in Weaknesses section above:\n\n2. How do the results in this paper explain the success of recent SSM variants (as claimed in lines 488)? Line 052-053 say : \"Which features of these successful models contribute to in-context learning, as opposed to earlier variants? Using a constructive approach, we pinpoint input-dependent input and output processing, as the key features required for in-context learning\". I hoped the paper would provide insight into this question. However, it seems that the constructed GD-SSM is quite different from the currently used SSM architectures (e.g. Griffin, Mamba), and performs much better empirically in the experiments. Meanwhile the currently used SSM architectures do not seem to perform regression in-context learning (at least for one layer). So how do the results in this paper answer (or point to answering) the first question in line 052 or support the claim in line 488? This is my main question regarding this paper. I list some additional questions below that are an attempt to clarify some of the presented empirical results below.\n\n3. Why does Griffin do so poorly compared to linear transformers and S5? Shouldn't Griffin be a mix of sliding window attention and SSM (the RG-LRU), making it similar to the GD-SSM formulation? Or is the model referred to as Griffin just RG-LRU?\n\n4. Why does the time-invariant S5 appear to consistently outperform the input-dependent Mamba and Griffin models (even though all fail to converge)? I would have expected the models with input-dependent dynamics to perform ICL better. Is this difference at all interesting?\n\n5. Does it benefit the other architectures (1 layer linear attention, S5, Griffin, Mamba) to also provide them with the local self-attention preprocessing? Shouldn't they then be able to potentially learn the GD-SSM model if provided this? If not, why not?\n\n6. Can 2 layers of the SSMs solve the task? Note that combining the local self attention with the diagonal SSM is a combination of 2 sequence processors.\n\n\nOther questions and comments:\n\n7. Where is the 1-step GD in Figure A?\n\n8. In Figure 4B, the 1 or 2 layer GD-SSM formulation always seems to achieve lower loss than the corresponding number of steps GD model. Why is this? What happens if we compare more layers and more steps, e.g. 5 or 10 steps/layers? \n\n9. Note that the color scheme in Figure 4C etc is hard to read and tell which line corresponds to which model.\n\n10. Note that LSA is first introduced in equation 2, but only later defined in line 119."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- To my knowledge, the insight viewing SSMs as a gradient accumulator allowing them to emulate gradient descent on in-context learning tasks is novel and the combination with local self-attention for preprocessing is interesting\n- The mathematical theory appears to be sound\n- The presentation of the material and step by step walk through of the theory from simple cases to more complex is clear and helpful\n- The theoretical findings potentially point to a mechanistic understanding of architectural requirements (for SSMs or other models) that enable types of in-context learning"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates how state-space models (SSMs) can perform in-context learning through gradient descent. The authors provide both theoretical and empirical evidence that SSMs augmented with local self-attention can emulate gradient descent on implicit regression models. Their key insight is that the diagonal linear recurrent layer in SSMs can act as a gradient accumulator."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the paper does a good job of explaining the theory and formulation of GD-SSM and empirically validating GD-SSM on regression tasks, I didn't feel like it shed that much insight into the currently used SSM variants used in practice.\n - Note that line 488 says: \"These findings not only explain the success of recent SSM variants in in-context learning tasks but\nalso provide valuable insights for the design of future sequence models.\" \n - Note that Lines 052-053 say, referring to the modern SSM variants : \"Which features of these successful models contribute to in-context learning, as opposed to earlier variants? Using a constructive approach, we pinpoint input-dependent input and output processing, as the key features required for in-context learning\". \n - But I do not think the current version of the paper supports either of the claim that this paper sheds light on these questions \n - The approach constructed seems to be very different from those used in practice. In addition, the methods commonly used in practice do not appear to do well on the regression ICL tasks in this paper. So it is unclear what I should take away from this related to prior SSM methods?\n - I think the empirical results were an opportunity to provide insight here, but didn't seem to fully achieve this. Please see my questions below which may clarify this for me.\n\n- Related to the above, the experimental section is light on architectural details, making it hard to determine what exactly is being compared empirically and what conclusions can be drawn. Please include more experimental details including the architectures and hyperparameters.\n\n- The paper is often unclear on the terminology of local self-attention and local linear self-attention. The formulation in Section 3.2 appears to only require local linear self-attention, yet other times in the paper local self-attention is used. In the related works section the two are contrasted. I would recommend being very explicit and consistent on this point as the two are very different. \n\n- The paper is limited to regression style in-context learning. This is interesting and amenable to theoretical analysis, but also limits the impact of the investigation. See https://arxiv.org/abs/2401.12973 and https://arxiv.org/abs/2402.04248 for other papers that have empirically investigated other types of in-context learning in different architectures."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "To the best of my understanding and as mentioned above, the results presented here are a subset of the results presented in Zucchet et al. 2023. Can the authors compare their work to that paper and highlight what their insights are?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The paper is overall well written and easy to follow. The theoretical part is sound and experiments convincingly demonstrate the paper's claims in toy settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In-context learning (ICL) is one of the surprising capabilities of LLMs at scale. Seminal results have shown that Transformer-based architectures have this ability and more recent ones confirmed that SSMs also do. This work studies shows that SSMs can implement ICL by gradient descent. They provide a constructive proof showing that 1 layer can implement one GD step and confirm empirically on toy tasks that the networks find this solution."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I am concerned by the novelty of this paper. [Zucchet et al. 2023](https://arxiv.org/abs/2309.01775) show that 1 SSM layer can implement any linear self-attention layer. This results implies that any ICL algorithm LSA can implement, an SSM can. This holds for 1 GD step studied in this paper, but also for any other algorithm the community has been studying over the last few years. Additionally, this paper also have very similar experiments to the ones presented here."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. See Weakness.\n2. The behavior of GD and 1-D GD-SSM appears different. Could the authors provide an explanation for this discrepancy? Clarifying this would help the readers better understand the distinctions in learning dynamics between these approaches.\n3. Figure 3: The font size in Figure 3 is too small and could be increased for readability."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper demonstrates that the state-space model with LSA can learn in-context learning tasks on linear and nonlinear regression problems. While I do not dispute the claim that state-space models can achieve in-context learning via gradient descent, my concern lies in whether the specific modification introduced warrants the detailed calculations provided. The conclusions, as presented, seem to offer limited insights into how this work might advance research on improving in-context learning capabilities. A clearer connection to meaningful improvements in this area would significantly enhance the paper's contribution.\n2. The experiments compare the state-space model with Griffin and Linear Transformer, but they are restricted to shallow architectures of 1 or 2 layers. This setup is inconsistent with typical in-context learning scenarios, where models need to be sufficiently large for emergent phenomena and meaningful in-context learning capabilities to surface. The current experiments do not effectively capture these dynamics, making it difficult to observe such phenomena in shallow sequence models. Expanding the experiments to include deeper architectures would provide a more realistic assessment of in-context learning in state-space models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper imitates Transformers learn in-context by gradient descent(https://arxiv.org/abs/2212.07677). This paper proves that a single structured state-space model layer with local self-attention can reproduce the outputs of an implicit linear model with least square loss. (The task considered is not general)"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper addresses a theoretical statement about the transformer model, specifically that it can learn in-context through gradient descent. However, this concept is already well-known and documented. In theoretical research, it is widely recognized that transformers, convolutional models, and recurrent models, such as state-space models, are universal approximators and are capable of learning continuous target relationships. Therefore, demonstrating the same for state-space models does not appear to offer significant theoretical advancement. If the authors wish to underscore the importance of this work, I would recommend showing that previous work in approximation theory does not extend to the in-context learning case. Without this distinction, the contribution seems to fall within a subset of known results that hold limited value for theoretical study. \n2. The notion of in-context learning, as presented, lacks practical interest. Simply stating that a model \"can learn\" through in-context learning is insufficient, as the same argument could be made for various methods, including Newton's or quasi-Newton's methods. There is no compelling reason for practitioners to assume that, when state-space models engage in in-context learning, the behavior in terms of loss convergence or asymptotic rates would align with that of gradient descent. Clarifying this distinction would strengthen the paper’s contribution. Could you provide empirical comparisons of convergence rates or asymptotic behavior between your method and alternatives like Newton's or quasi-Newton's methods.\n3. The paper introduces a state-space model with local self-attention, which is not a commonly adopted approach in practice. It would be more beneficial to align the framework with models that are widely used in real-world applications. A method akin to the linear transformer might be more appropriate and could provide a better point of reference for practical utility."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "the mechanism of in-context learning in state-space models corresponds to gradient descent"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024statespace,\ntitle={State-space models can learn in-context by gradient descent},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=52XG8eexal},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep state-space models (Deep SSMs) have shown capabilities for in-context learning on autoregressive tasks, similar to transformers. \nHowever, the architectural requirements and mechanisms enabling this in recurrent networks remain unclear. \nThis study demonstrates that state-space model architectures can perform gradient-based learning and use it for in-context learning.\nWe prove that a single structured state-space model layer, augmented with local self-attention, can reproduce the outputs of an implicit linear model with least squares loss after one step of gradient descent.\nOur key insight is that the diagonal linear recurrent layer can act as a gradient accumulator, which can be `applied' to the parameters of the implicit regression model.\nWe validate our construction by training randomly initialized augmented SSMs on simple linear regression tasks. The empirically optimized parameters match the theoretical ones, obtained analytically from the implicit model construction. \nExtensions to multi-step linear and non-linear regression yield consistent results.\nThe constructed SSM encompasses features of modern deep state-space models, with the potential for scalable training and effectiveness even in general tasks. \nThe theoretical construction elucidates the role of local self-attention and multiplicative interactions in recurrent architectures as the key ingredients for enabling the expressive power typical of foundation models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"state-space models",
"in-context learning",
"linear recurrent networks",
"mesa-learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/66239f66cc8f26c06fb2944bd73389844224621a.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "State-space models can learn in-context by gradient descent"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
52x04chyQs | On the Completeness of Invariant Geometric Deep Learning Models | main | Active | geometric deep learning;invariant models;completeness;expressiveness;graph neural network;subgraph graph neural network | learning on graphs and other geometries & topologies | 5;5;5;6 | 2;3;3;2 | 2;3;2;3 | 2;3;3;3 | 3;1;2;2 | 5.25 | 2.5 | 2.5 | 2.75 | 2 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please address the concerns I raised in the weaknesses section. Additionally, I recommend revising the introduction to better reflect the paper's contributions. In my view, the primary contribution is the proposal of the geometric counterpart of NGNN and the proof that this approach effectively resolves the limitation of DisGNN in identifying **symmetric point clouds when the graphs are even fully connected**. \nThe authors should also consider relevant experiments in this direction to emphasize the novelty and significance of this work."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper attempts to address a crucial problem that enhances our understanding of the potential of invariant neural networks and can guide future model design.\n- Investigating the geometric counterparts of subgraph GNNs is a novel contribution.\n- The results extend beyond specific cases, such as asymmetric point clouds, broadening our understanding of how these models perform on symmetric point clouds as well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the geometric completeness of a significant class of geometric deep learning models: invariant neural networks. These networks leverage invariant features to impose strong inductive biases on spatial information, yet their theoretical expressive power remains somewhat unclear. This study aims to bridge that gap, enhancing both our theoretical understanding and practical application of these models.\n\nThe authors first demonstrate that incorporating distance into message-passing neural networks (like DisGNN) allows for the identification of asymmetric point clouds but struggles with highly symmetric ones. They then investigate geometric extensions of subgraph-based GNNs and prove that these models, specifically GeoNGNN, can successfully distinguish symmetric point clouds, achieving E(3)-completeness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper lacks clarity and structure in some areas. The detailed explanation of NGNN, which serves as the backbone of their main contribution, the GeoNGNN framework, is left in the appendix. I recommend the authors integrate key aspects of NGNN, such as its core equations or an architectural diagram, into the main text. Additionally, including a comparison with the original DisGNN would be helpful—highlighting the differences and explaining what enables NGNN (intuitively) to overcome the limitations of DisGNN. This would give readers a clearer understanding of how the proposed approach builds on previous work and addresses specific challenges without disrupting the flow. Instead, a brief overview of NGNN, along with key formulas and a comparison with GNN, could benefit the flow of the whole paper.\n\n- The results are primarily constrained to cases with global connectivity, which is often impractical in real-world applications due to the significant computational costs. Several studies [1], [2], [3], [4] have explored scenarios where the graph is not fully connected, underscoring the need to evaluate the performance of invariant neural networks in sparse graph settings. In practice, invariant neural networks tend to perform worse than equivariant ones in these cases. While the authors have left these cases in the future direction, it would greatly strengthen the paper if they could extend their analysis to sparse graphs or at least discuss how their completeness results may vary with different levels of graph sparsity. Providing theoretical bounds on performance degradation as connectivity decreases would also be valuable.\n\n- The experimental results, while showing some improvement, are relatively marginal, which limits the empirical impact of the work. I suspect this might be due to the sparsity of the graphs used in practical applications. The authors should aim to demonstrate the significance of their approach by clarifying in which specific cases their method outperforms existing methods. Providing examples or scenarios where GeoNGNN has a clear advantage would strengthen the empirical contributions.\n\n\n[1] Wang, L., Liu, Y., Lin, Y., Liu, H., & Ji, S. (2022). ComENet: Towards complete and efficient message passing for 3D molecular graphs. Advances in Neural Information Processing Systems, 35, 650-664.\n[2] Joshi, C. K., Bodnar, C., Mathis, S. V., Cohen, T., & Lio, P. (2023, July). On the expressive power of geometric graph neural networks. In International Conference on Machine Learning (pp. 15330-15355). PMLR.\n[3] Wang, S. H., Hsu, Y. C., Baker, J., Bertozzi, A. L., Xin, J., & Wang, B. (2024). Rethinking the benefits of steerable features in 3D equivariant graph neural networks. In The Twelfth International Conference on Learning Representations, ICLR\n[4] Sverdlov, Y., & Dym, N. (2024). On the Expressive Power of Sparse Geometric MPNNs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1) The excessive use of bold text and the absence of a clear outline make the paper’s contributions difficult to discern. Could the authors consider restructuring the introduction for better clarity?\n\n2) I find that the statements in the section **Theoretical Characterization vs Practical Use** rely on the example C.2. How increasing the sparsity beyond this simple example is understudied despite the authors strong claims that relaxing the fully-connected condition leads to better expressiveness of GeoNGNN compared to DisGNN.\n\n3) There is no supporting evidence for GeoNGNN over existing architectures in the primary paper. Additionally, there is no comparative analysis involving node feature information generated by a complete invariant function. Could the authors address this gap?\n\n4) A significant portion of the QM9 dataset consists of non-symmetric structures. What are the proportions of indistinguishable data restricted to the subset of QM9 that includes only symmetric structures?\n\n5) In the QM9 noise study, the significant reduction in non-distinguishable point clouds occurs near what appears to be the level of reported error in the QM9 dataset. Given the reported error of 0.1Å, how is this error rescaled based on the applied scaling coefficient? \n\n6) Distinguishing structures on QM9, which lacks conformers, does not seem to be as important as datasets which contain conformers or very nearly isomorphic point clouds.The most compelling analysis appears to come from the study of MD17 but with mixed results. GeoNGNN appears to do particularly well on Benzene which is highly symmetric. How does Benzene behave under the noise tolerance study?\n\n7) Typically, ModelNet40 is sampled to avoid handling large point clouds. It is unclear from the text whether the entire mesh or a sampled version is used. If sampled uniformly, there is no guarantee that the symmetries are preserved. Could the authors clarify this in the text?\n\n8) The selection of ModelNet40 does not seem to rigorously test the theoretical claims of the paper, which focus on nearly isomorphic point clouds. Could the authors provide more rigorous testing on datasets that better align with their theoretical focus?\n\n9) It is unclear from the text and appendix what each structure in the synthetic dataset represents, how these structures were constructed, and why they are significant. Could the authors provide more detailed explanations on the construction and relevance of these synthetic structures?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper introduces a novel conceptual framework for understanding the efficacy of certain invariant architectures. This is further supported through theoretical analysis and empirical studies. Additionally, it proposes a framework for the development of future architectures extending the impact and significance of the work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper offers the following contributions:\n1) Introduces and defines the notion of \"Identify\" for invariant GNNs, positioned between distinguishability and completeness.\n2) Provides a characterization for the incompleteness of DisGNN\n3) Proposes GeoNGNN to ensure indentification of the cases where DisGNN is incomplete\n4) Demonstrates that several established invariant GNNs are capable of completeness"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper lacks sufficient empirical evidence to support its theoretical analysis, significantly reducing the overall significance and impact of the work. The selected real world experiments emphasize datasets which lack conformers or nearly isomorphic point clouds. Furthermore, the main text does not provide adequate evidence to demonstrate the advantages of GeoNGNN over the existing complete invariant architectures.\n\nAdditionally, the excessive use of bold text and the absence of a clear outline in the introduction make it challenging to follow and clearly understand the contributions of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- How does this method for expressivity generalize to different types of architectures? To the reviewer it seems that this method for showing is very specific and would be difficult to generalize to other types of equivariant architectures acting on point clouds."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors have provided both extensive proofs as well as extensive analysis\nto their claims. Overall the presentation and intend is clear and definitions\nare well-thought out and the authors provide a good heuristic insight with each\nintroduced theorem and definition which is nice. The extensive analysis of both\nDisGNN and GeoNGNN shows that the work is of good quality and looks to be of\ngood quality to the reviewer. All theorems come with extensive proofs and with\na intuition which is helpful for the non-mathmatical audience. The quality of\nthe content, such as originality and potential impact, is harder to asses since\nthe reviewer is not familiar expressivity research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors of the paper prove that certain families of models are not only\ninvariant with respect to the Euclidian group and permutation group, but also\nthat classes of models distinguish the orbits of $\\mathbb{R}^3$ under the\naction of $E(3)$. An extended analysis of the expressivity of DisGNN is\nprovided and it is shown that this network architecture is nearly $E(3)$\ncomplete. As a last contribution an analysis is provided for various families\nof neural networks and conditions are provided under which they are\n$E(3)$-complete. The theoretical results are verified by experiments\non the QM9 dataset and a synthetic dataset with designed edge cases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "To the reader it seems that some of the definitions are somewhat convolved and\nsome simplification and clarity in the definitions might improve reading. Some\nof the definitions, while they might be customary in the machine learning\nliterature, are somewhat unfortunately choses from a mathematical perspective.\nCompleteness of a space in the mathematical sense implies that each Cauchy\nsequence has a limit within that space. A second example is the use of the term\nisomorphism. While not wrong, a better phrasing is to say that the two point\ncloud lie in the same orbit with respect to the action of the Euclidian group\nacting on the tensor product of copies of $\\mathbb{R}^3$. The current phrasing\nmight be better if is more in line with terminology used in machine learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have two questions.\n\n1. Do you have any insight on achieving E(3)-completeness for frame-based approaches?\n\n2. Can you comment on achieving E(3)-completeness by using node features beyond pairwise distances, e.g., dihedral angles?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper symmetrically studies the problem of E(3)-completeness geometric graph neural networks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the expressiveness power of message-passing neural networks incorporating pairwise distance between graph nodes, showing the near E(3)-completeness. Furthermore, the authors study the subgraph graph neural networks, which can achieve E(3)-completeness. Therefore, it is possible to make DimeNet, GemNet, and SphereNet to achieve E(3)-completeness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The work is based on global connectivity assumption, and this assumption significantly limits this work. Also, the experimental results seem to be quite weak."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024on,\ntitle={On the Completeness of Invariant Geometric Deep Learning Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=52x04chyQs},\nnote={under review}\n}"
},
"abstract": {
"value": "Invariant models, one important class of geometric deep learning models, are capable of generating meaningful geometric representations by leveraging informative geometric features in point clouds. These models are characterized by their simplicity, good experimental results and computational efficiency. However, their theoretical expressive power still remains unclear, restricting a deeper understanding of the potential of such models. In this work, we concentrate on characterizing the theoretical expressiveness of a wide range of invariant models. We first rigorously characterize the expressiveness of the most classic invariant model, message-passing neural networks incorporating distance (DisGNN), restricting its unidentifiable cases to be only highly symmetric point clouds. We then prove that GeoNGNN, the geometric counterpart of one of the simplest subgraph graph neural networks, can effectively break these corner cases' symmetry and thus achieve E(3)-completeness. By leveraging GeoNGNN as a theoretical tool, we further prove that: 1) most subgraph GNNs developed in traditional graph learning can be seamlessly extended to geometric scenarios with E(3)-completeness; 2) DimeNet, GemNet and SphereNet, three well-established invariant models, are also all capable of achieving E(3)-completeness. Our theoretical results fill the gap in the expressive power of invariant models, contributing to a rigorous and comprehensive understanding of their capabilities."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"geometric deep learning",
"invariant models",
"completeness",
"expressiveness",
"graph neural network",
"subgraph graph neural network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/111ba0fcd0bad3a6f6d645480e99ea80ad5eb8a4.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/3f888d50a7efed07e6bcf0f186b6ddf1c0f2f117.zip"
},
"title": {
"value": "On the Completeness of Invariant Geometric Deep Learning Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
53MDeiZ9mC | Mitigating Gradient Interference for Efficient Sparse Fine-Tuning of Large Language Models | main | Active | Large language models;Sparse | foundation or frontier models, including LLMs | 3;5;5;5 | 3;4;4;4 | 3;2;3;3 | 2;2;2;3 | 4;2;2;3 | 4.5 | 3.75 | 2.75 | 2.25 | 2.75 | 1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper have a strong theoretical foundation with mathematical proofs about the error bounds of using dense adapter for sparse LLMs. It emphasize on the gradient interference, and mitigate this error for a better PEFT method for sparse models.\n\n2. Novel identification and analysis of the gradient interference problem. This also results in a novel PEFT method.\n\n3. Experimental results show its advantage in comparison to LoRA.\n\n4. This paper is well-written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce a comprehensive theoretical framework for memory-efficient fine-tuning for sparse LLMs. The authors identify and analyze a key challenge called \"Sparse Weight Gradient Interference,\" where masked pre-trained weights and PEFT weights exhibit competing optimization objectives during fine-tuning. To address this, they propose a novel method combining three key innovations: a pooling-based PEFT method, normalization of PEFT modules, and an adaptive layer-wise approach using Centered Kernel Alignment for sparsity allocation. Their theoretical analysis identifies three crucial factors affecting fine-tuning efficacy: errors from weight norms, PEFT structures, and error accumulation during fine-tuning. The effectiveness of their approach is demonstrated through extensive experiments on LLaMA-2 models, showing superior performance compared to existing methods, particularly in maintaining model performance under high sparsity conditions (up to 70%), while providing theoretical guarantees for error bounds."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack empirical upper bound: The paper lacks a crucial empirical upper bound comparison. Since the core problem stems from using dense adapter weights with a sparse model, an ideal oracle baseline would be directly fine-tuning only the sparse positions in the model. Including this oracle baseline would provide a clearer understanding of the maximum achievable performance without gradient interference. This comparison would also serve as a valuable reference point for future research in sparse fine-tuning methods.\n\n2. Limited model diversity in experiments: This work only evaluate LLaMA-2 family model. I will recommend the authors to further evaluate LLaMA-3 family and Mistral family for a more comprehensive comparison.\n\n3. Lack practical efficiency: While the proposed method show better performance, the authors do not provide any efficiency results to show the practical training speed of the proposed method. The proposed method cannot be useful if it is slow even with strong theoretical foundation. Therefore, I think the authors should provide the number of trainable parameters, the training time, the training memory for LoRA and the proposed method.\n\n4. Typo in Table 2: I don't see any other \"LoSA\" in this paper. The authors should either delete it or add an definition for it.\n\n5. Lack of ablation studies: This paper do not do ablation study for the Iterative Sparse Fine-Tuning Scheme. This should be added to verify its effectiveness.\n\nI will consider to raise my score if more evidence are provided about the effectiveness of the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Wanda does not require updating the model weights during pruning unlike the proposed method and SparseGPT. So if Wanda is used as a baseline here, is it also fine-tuned using the same amount of data as this method for fair comparison? \n2. It is not clear in line 4 of algorithm 1 why the sparsity mask is updated using either SparseGPT or WANDA\n3. Given that this method uses PEFT and does not have to update all the model parameters like SparseGPT, can you report the amount of time and memory required to run your method and compare it against other baselines too? This might bolster your case further.\n4. Could you provide some insights on why is your method so much better than SparseGPT given that the latter updates the weights of the entire model?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Allocating additional fine-tuning parameters to the layers with higher reconstruction loss is novel.\n2. They demonstrate that the zero-shot performance of the model after pruning is the best when compared to the other baselines on various tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "A common method to prune iteratively is to learn the mask and also update the model weights to recover the loss induced by pruning using fine-tuning. The mask is applied such that the reconstruction loss of each layer after applying the mask is minimized. Rather than regular fine-tuning, this method employs LoRA to heal the model. They posit that using LoRA to heal the model introduces errors owing to the \"Sparse Weight Gradient Inference\" problem. This occurs because the LoRA module is not aware of where the masks have been applied to the frozen pre-trained model. Thus, it could have gradients for parameters where the frozen pre-trained model is pruned and set to 0 resulting in interference. They estimate the error that using LoRA incurs. \n\nThe proposed method uses bilevel optimization where in the upper level the mask is learnt and in the lower level the weights are updated using a modified LoRA. The modified LoRA applies a pooling operation on the input to reduce it to a lower dimension g, followed by multiplying the resulting value with weight G and then projects the output back to the original dimension. The weights G are learnt in this PEFT variant. To further alleviate the error introduced by using LoRA to heal is to rein in the magnitude of weight change by using normalization such as weight-decay or drop out etc. Rather than introducing the parameters uniformly to all layers for fine-tuning, more parameters are allocated to layers with higher reconstruction loss. The layerwise sparsity rate is set based on the Hilbert-Schmidt Independence Criterion metric and is inversely proportional to it, thereby pruning layers with higher information redundancy. This whole process is repeated for several iterations and the layer-wise sparsity rate, the sparsity mask and model weights are updated at each iteration."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the sparsity is induced by applying masks, unstructured pruning does not reduce the latency. In real world applications, reducing the latency is also crucial. Could you report the latency improvements of the final model and how it compares to just using N:M sparsity and WANDA etc?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- How does the proposed method perform for structured or semi-structured sparsity?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors point out the Sparse Weight Gradient Interference (SWGI) phenomenon, attributing to the mismatch between the sparse structure of the model weights and the dense structure of the PEFT weights.\n\n- The authors provide a theoretical analysis of the SWGI phenomenon and related bounds on the errors introduced by sparsification."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors focus on the sparsification and PEFT recovery of LLMs. The authors identify and address the Sparse Weight Gradient Interference (SWGI) phenomenon, where gradients from masked weights interfere with the fine-tuning of active parameters, leading to performance degradation. The authors conduct a theoretical analysis on this problem, and propose a new new iterative sparse fine-tuning scheme to handle this problem. Experiments on benchmarks show that the proposed method recovers the accuracy better than LoRA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The structure and readability of the paper could be improved. Also, a main figure illustrating the problem setting or the SWGI phenomenon, and the proposed method could be helpful.\n\n- A detailed ablation study on the many components that comprise the method (CKA-guided sparsity setting, PEFT parameter allocation, sparsity scheduling) is missing.\n\n- More experiments on well-accepted benchmarks such as MMLU or GSM8K are needed to verify the effectiveness of the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The theoretical examination of Sparse Weight Gradient Interference in sparse PEFT methods is sound and provides valuable insights.\n- The proposed method outperforms LoRA in specific method-benchmark combinations.\n- The improvements apply across different levels of sparsity and sizes of language models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a theoretical examination of parameter-efficient fine-tuning (PEFT) for sparse large language models (LLMs). The authors explore Sparse Weight Gradient Interference, identifying large weight norm as a potential main source of loss error in general sparse PEFT methods. Additionally, they suggest that the LoRA structure contributes to loss error in sparse PEFT and propose an alternative PEFT approach consisting of three steps—pooling, linear transformation, and expansion—which they argue achieves a tighter upper bound on loss error compared to LoRA. They also raise the possibility of error accumulation over fine-tuning iterations as a further source of loss. Alongside their theoretical insights, the authors present a brief empirical analysis showing that their sparse PEFT method can outperform LoRA across certain benchmarks when combined with the sparseGPT and Wanda LLM pruning techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper lacks detailed experimental settings, making it unclear if the comparison with the baseline is entirely fair. Key details such as the number of data points used for LoRA, the number of fine-tuning iterations, hyper-parameters, and any tuning performed (particularly for learning rate) are missing.\n- The empirical evaluation is limited to classification tasks, and additional open-ended generation downstream tasks would strengthen the assessment of the proposed method.\n- Even on the limited set of reported downstream tasks, improvements are inconsistent, with LoRA outperforming the proposed method on certain model-task combinations, such as Llama2(13b)-wanda.\n- There is no discussion on result variance; only single values are reported. Given the minor improvements observed, additional experiments with varying random seeds are needed to allow readers to assess the method's efficacy more reliably.\n- The statement 'Empirical evaluation on a 50% sparse LLaMA-2 7B model demonstrates the superiority of our approach, achieving lossless compression' in abstract is misleading and not supported by the results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mitigating,\ntitle={Mitigating Gradient Interference for Efficient Sparse Fine-Tuning of Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=53MDeiZ9mC},\nnote={under review}\n}"
},
"abstract": {
"value": "Large Language Model (LLM) sparsification plays a crucial role in model compression. \nAmong various methods, training-free approaches are highly efficient but often result in accuracy loss, while full fine-tuning requires substantial computational resources. \nRecent works have begun exploring sparse Parameter-Efficient Fine-Tuning (PEFT) methods, but lack theoretical guidance.\nThis study presents the first comprehensive theoretical framework for efficient sparse fine-tuning, addressing a critical gap in the literature. \nSpecifically, we identify gradient conflict as the primary issue in PEFT sparse methods, wherein masked pretrained weights and corresponding PEFT weights exhibit competing optimization objectives during fine-tuning, potentially compromising model performance.\nWe theoretically model this phenomenon and identify three key factors influencing the efficacy of fine-tuning in sparsified LLMs: (1) error introduced by weight norms, (2) error composition from PEFT structures, and (3) error accumulation during fine-tuning.\nLeveraging these theoretical insights, we propose a novel iterative sparse fine-tuning scheme that systematically addresses each identified factor. \nWe implement an iterative process alternating between sparsity and fine-tuning to mitigate accumulated error in single turn of finetuning. \nWe employ pooling instead of low-rank decomposition to reduce error composition from PEFT structures. \nWe apply normalization to PEFT modules during fine-tuning, constraining error values by limiting weight norms while preserving representational capacity. \nAdditionally, we utilize Centered Kernel Alignment based information similarity assessment for adaptive allocation of layer-level sparsity and PEFT parameter quantities, addressing layer-specific redundancy.\nEmpirical evaluation on a 50\\% sparse LLaMA-2 7B model demonstrates the superiority of our approach, achieving lossless compression."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large language models",
"Sparse"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/aa6cb68df15e956512a954ca7504429b64680816.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Mitigating Gradient Interference for Efficient Sparse Fine-Tuning of Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
53kUa92R7J | Loius (Look it up in the Structure): Benchmark and Techniques for Document structure aware LLM based Retrieval | main | Active | information retrieval;llm;model based retrieval;document search;retrieval benchmark;document structure;benchmark | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;3 | 4;3;4 | 3;2;2 | 2;1;2 | 3;1;2 | 3 | 3.666667 | 2.333333 | 1.666667 | 2 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Most questions are in the weaknesses. But some comments about writing:\n- Citations should be \\citep{}, it seems like they are not, and are instead inline\n- [Minor] It would be nice to see some examples in the appendix, in terms of passage it came from and question generated."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The new proposed task is an interesting contribution, and to my knowledge has not been studied before\n- They use a wide variety of retrieval model architectures, from cross-encoders, to generative retrieval, spare retrieval, and dense retrieval.\n- Their proposed model does better on the data they generated"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new task for retrieval models: that of finding the relevant section title in a book, given a question. They build a dataset for this task by using PDF parsing to extract the table of contents from books. They generate question for the task through the use of Mixtral. \n\nThey then train a model to do well on this task, called Louis. Their model is trained on the training set (also generated with Mixtral) and shows improved performance over other model types, including non-fine-tuned versions of DSI, BM25, and Mistral. They show an error analysis where their model performs better."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There is a lack of important details. How many chunks/passages are there in the corpus for dense retrieval (or BM25)? Does each chunk for these models also get the section, so that the model can use that information? Why are there no long context models evaluations (Gemini/Prolong/even just a 128k Llama)? Does Mistral see the entire book or just the ToC? etc.\n2. Conceptually, performing ToC retrieval is strictly easier than searching for the specific passage that a question was generated from. From related work on searching for the relevant passage in long documents (see the \"Scrolls Benchmark\" [1] for task examples) IR models perform pretty well with retrieval (see the LoCo benchmark [2]). It seems something is wrong with the setup if the models are doing this poorly. My guess is that the book is chunked but not given the section headers in the chunks, so they lack that context. Furthermore, BGE is a decent baseline, but pretty weak in comparison - see the MTEB leaderboard where BGE-base (I assume because of the 768 dim vector, but again not described) is ranked #47.\n3. Overall, the task does not appear very challenging. R@3 is 90% and close to that for many baseline models (DSI, BM25). If BM25 can get nearly the same performance as the proposal model, it does not seem like much of an improvement (and is perhaps not even statistically significant).\n4. There is no analysis on the false negative information in these questions. It is likely that many questions are answered in several places and that falsely lowers the score. I would guess that if the authors did an analysis of the Recall @ 3 failures that the answer is given in both places. This makes (3) more of an issue and lowers the quality of the dataset.\n5. The main modeling contribution seems to be that using the same data generation process as making the test set, and then by training on it, makes a model better able to perform this task. This is unsurprising and demonstrated over and over in the literature on every modality. This would be totally fine, if this was the only issue, but I assume that this means that the proposed Louis model cannot do another other tasks, as it is likely overfit for this. \n\nOverall, I think the task could be useful for the retrieval community as a useful long-context evaluation. However, I am unconvinced of the datasets quality and there appear to be modeling concerns (or at least a lack of information).\n\n[1] Scrolls Benchmark: https://www.scrolls-benchmark.com/tasks\n\n[2] LoCo benchmark: https://arxiv.org/pdf/2402.07440v2"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "The questions are listed in the 'Weaknesses' section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper introduces a new dataset containing queries and their corresponding subsections, which may contribute to research in document retrieval.\n2. Constructing the dataset and conducting the experiments required substantial effort."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Inspired by the way humans often search through the Table of Contents when reading books, this paper introduces a new task: given a query and the Table of Contents (TOC) of a long document, the objective is to retrieve the correct subsection in the TOC that contains evidence to answer the query. The authors introduce and release a new multi-domain dataset, ToCTome, which consists of 18 books across 6 domains. For each subsection, they use the Mixtral 8x7b model to generate questions, forming subsection-query pairs. Additionally, they split the data into training, development, and test sets, and fine-tune Mistral Instruct v0.2 with the LoRA adapter. Experimental results demonstrate that the fine-tuned Mistral Instruct v0.2 achieves an impressive R@1 score of 82.6%, outperforming BM25, dense retrieval models, and the original, non-fine-tuned Mistral Instruct v0.2 on this task. These findings highlight the strong capabilities of LLM in ToC-based retrieval."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors introduce a new problem but do not explain its importance and significance. The purpose of using a large language model (LLM) to locate the subsection relevant to the query is unclear. The authors simply state that this \"mimics how humans leverage the structure of a book,\" but they don’t explain why this is necessary. I believe the authors should address this question: \"After finding the subsection relevant to the query, what can be achieved?\"\n2. The authors assume that each query can be matched to a corresponding subsection in the Table of Contents. However, in real-world scenarios, many queries may not align with the Table of Contents; in particular, some fine-grained questions cannot be mapped to a specific subsection.\n3. The proposed dataset and method are designed for queries that correspond to a single subsection. However, some queries may span multiple subsections, which this dataset and method do not account for. This results in oversimplified problem modeling.\n4. The writing of this paper has several issues: (1) Many sentences do not follow an academic tone, and exclamation marks are used frequently, which is uncommon in academic papers. (2) Detailed experimental results are repeated in the ABSTRACT, INTRODUCTION, and EXPERIMENTS sections."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can the methods developed for Loius be adapted for general retrieval tasks beyond structured documents like books? If so, what modifications would be necessary?\n2. Why were different maximum input lengths used for Loius and DSI in your experiments? Could this difference affect the comparative performance results reported?\n3. Given the reliance on a structured ToC, how does Loius handle content variations within sections that might not be accurately reflected by their ToC descriptions?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The innovative use of the ToC to guide the retrieval process introduces a unique approach to document navigation, potentially transforming retrieval strategies for structured documents.\n- Extensive experiments validate the system’s efficacy, particularly its ability to outperform traditional retrieval methods.\n- The creation of the ToCTome benchmark contributes a valuable resource to the research community, fostering further innovation in retrieval systems that understand document structure."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The research paper introduces Loius, a novel retrieval system that leverages the structure of ToC to enhance retrieval processes. By mimicking how humans intuitively use a ToC to navigate books, Loius proposes a fresh retrieval paradigm that contrasts with traditional methods based on keyword or vector-based searches. The paper also presents a new benchmark dataset ToCTome, which comprises 18 books across six diverse domains, providing a robust platform for evaluating ToC-based retrieval systems. The results demonstrate Loius’s superior performance in accurately locating relevant sections, boasting a Recall@1 score significantly higher than the next best system (DSI) and other baseline models like BM25 and DPR."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The specific application of ToC-based retrieval to books may not generalize to other types of content that lack a clear hierarchical structure, such as unstructured web pages or documents without a ToC.\n- The technical novelty appears limited primarily to the adaptation of existing retrieval technologies to a new input structure (ToC), potentially limiting the method’s broader applicative insights.\n- In Section 5.2, I find that the maximum input length of Loius and DSI are significantly different. It is unclear if this difference may affect the experimental results."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Can we instruction finetune an LLM to pick the best subsection to answer a user query by showing it a Table of Contents of a long document?"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024loius,\ntitle={Loius (Look it up in the Structure): Benchmark and Techniques for Document structure aware {LLM} based Retrieval},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=53kUa92R7J},\nnote={under review}\n}"
},
"abstract": {
"value": "Modern day LLMs have shown remarkable success in understanding human instructions and provide satisfactory responses across a diverse set of tasks that require world knowledge. In this work, we leverage the inherent capability of LLMs to learn new tasks through instruction to design a novel retriever Loius that proposes a new retrieval paradigm: Table of Contents (ToC) based retrieval. Loius mimics how humans leverage the structure of a book (or a long document) by using ToC to search for information. Loius demonstrates that LLMs can be finetuned to build ToC based retrievers where the most granular ToC entry (leaf node) serves as the retrieval unit. This approach differs from traditional retrieval systems, which index chunks of information from long documents. We also introduce a novel\ncomprehensive benchmark ToCTome featuring 18 books across 6 diverse domains (with train, test and dev splits) and show that Loius achieves R@1 score of 82.6% as compared to the next best system (DSI) (76.9%) and other baselines such as BM25 (71.6%), DPR (53.6%) and out-of-the-box Mistral Instruct v0.2 (18.0%) on average. Loius is capable of incorporating corpus knowledge while learning to select the most relevant ToC leaf node, resulting in almost zero hallucinations (lessthan 0.05%) – a key issue with LLM-based retrievers. In addition to the benchmark, complete code for building Loius will be publicly released to accelerate research\nin the novel direction of ToC based retrieval."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"information retrieval",
"llm",
"model based retrieval",
"document search",
"retrieval benchmark",
"document structure",
"benchmark"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/93b973fae8da9e008b97f70fa474e199c1844263.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/77450c042f442e7fc21d225dfdc8f444e0bda0ab.zip"
},
"title": {
"value": "Loius (Look it up in the Structure): Benchmark and Techniques for Document structure aware LLM based Retrieval"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
53xxT3LwJB | NN-ResDMD: Learning Koopman Representations for Complex Dynamics with Spectral Residuals | main | Active | Koopman operator;data driven dynamical system;dynamic mode decomposition | learning on time series and dynamical systems | 3;3;5;6 | 3;4;5;3 | 2;4;3;3 | 2;2;2;2 | 2;2;3;3 | 4.25 | 3.75 | 3 | 2 | 2.5 | 0.058026 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Although there is no specific question that will surely affect my evaluation, some comments if any on the points listed in the Weaknesses section would be highly helpful."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The method is technically reasonable.\n- The idea is somewhat new. To my knowledge, using the resDMD objective with neural observables is quite natural but has not been exactly practiced yet.\n- The experiments nicely demonstrate the utility of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "A method for computing the Koopman spectra of dynamical systems is proposed. It stands on two main ingredients: 1) the use of spectral residual as a loss function, and 2) the use of NNs for constructing observables. Its applications to a pendulum, turbulence, and neural dynamics are presented, and the proposed method is shown to be successful in extracting the Koopman spectra and analyzing the data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) Although the work is solid, I do not think the technical contribution is so significant to be included in the ICLR proceedings. The use of neural observables has been known and practiced well in these 8 years or so, and ResDMD has been already well known and discussed recently. The technical contribution of this work inevitably looks incremental.\n\n(2) The rich literature on the use of NNs as DMD observables, other than Li et al. (2017), seems to be overlooked. For example, even restricting the scope to the mere use of NNs for DMD-based analysis (i.e., excluding more applied perspectives such as control), the following papers (and probably more) should be relevant:\n\n- N. Takeishi, Y. Kawahara, T. Yairi: Learning Koopman invariant subspaces for dynamic mode decomposition, Advances in Neural Information Processing Systems 30, 2017, pp. 1130–1140\n- B. Lusch, J. N. Kutz, S. L. Brunton: Deep learning for universal linear embeddings of nonlinear dynamics, Nature Communications, vol. 9, no. 1, p. 4950, 2018\n- A. Mardt, L. Pasquali, H. Wu, F. Noé: VAMPnetsfor deep learning of molecular kinetics, Nature Communications, vol. 9, no. 1, p. 5, 2018.\n- E. Yeung, S. Kundu, N. Hodas: Learning deep neural network representations for Koopman operators of nonlinear dynamical systems, Proceedings of the 2019 American Control Conference, 2019, pp. 4832–4839\n- S. E. Otto, C. W. Rowley: Linearly recurrent autoencoder networks for learning dynamics, SIAM Journal on Applied Dynamical Systems, vol. 18, no. 1, pp. 558–593, 2019\n- O. Azencot, N. B. Erichson, V. Lin, M. W. Mahoney: Forecasting sequential data using consistent Koopman autoencoders, Proceedings of the 37th International Conference on Machine Learning, 2020, pp. 475–485\n- H. Wu, F. Noé: Variational approach for learning Markov processes from time series data, Journal of Nonlinear Science, vol. 30, no. 1, pp.23–66, 2020\n- D. J. Alford-Lago, C. W. Curtis, A. T. Ihler, O. Issan: Deep learning enhanced dynamic mode decomposition, Chaos: An Interdisciplinary Journal of Nonlinear Science, vol. 32, no. 3, p. 033116, 2022\n- T. Iwata, Y. Kawahara: Neural dynamic mode decomposition for end-to-end modeling of nonlinear dynamics, Journal of Computational Dynamics, vol. 10, no. 2, pp. 268–280, 2023\n\nI do not think all of them should be included in the reference with detail, but at least the existence of such a rich literature should be mentioned to help readers to better understand the context of the research.\n\nBelow are relatively minor technical points that I found unclear.\n\n(3) The authors emphasize the \"lack of theoretical guarantee of convergence\" of EDMD for several times. In what sense is this \"lack\" supposed? For example, the work by Korda & Mezić:\n\n- M. Korda & I. Mezić: On convergence of extended dynamic mode decomposition to the Koopman operator, Journal of Nonlinear Science, vol. 28, pp. 687–710, 2018\n\ndiscusses the convergence in some sense.\n\n(4) The proposed method alternates between the gradient-based update of $\\theta$ and the least squares solution to get $K$. Is there any insight of this choice? I am asking this because it is also possible to include the least squares within the gradient computation, as done in Takeishi et al. (2017); Otto & Rowley (2019) listed above for example.\n\n(5) In the experiment, does EDMD-DL follow the exactly same configuration as NN-ResDMD except for the loss function? (it should.) Please elaborate on this more clearly to make it easier to assess the benefit particular to the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "(1) How does this method differ from existing deep learning approaches for Koopman operator estimation, and what substantial improvements does it offer in terms of robustness, accuracy, or efficiency?\n\n(2) What are the differences and connections between the proposed loss function in this paper and existing evaluation functions like the VAMP score?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The proposed NN-ResDMD method offers a deep neural network based approach for estimating Koopman spectral components"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose NN-ResDMD, a deep learning-based approach that directly estimates Koopman spectral components by minimizing a spectral residual. This method aims to improve the reliability of approximating Koopman spectra in nonlinear dynamical systems by automatically identifying optimal basis functions for the Koopman invariant subspace. The paper presents experiments on physical and biological systems, demonstrating the method's scalability and accuracy for complex dynamics.\n\nMy main comment on this paper is that it lacks sufficient innovation or fails to effectively demonstrate its unique contributions. The use of deep learning to estimate Koopman operators has been explored extensively in prior research. For example:\n\n[1] Lusch, B., Kutz, J. N., & Brunton, S. L. (2018). Deep learning for universal linear embeddings of nonlinear dynamics. Nature Communications, 9, Article 4950.\n[2] Mardt, A., Pasquali, L., Wu, H., & Noé, F. (2018). VAMPnets for deep learning of molecular kinetics. Nature Communications, 9, Article 5.\n[3] Mardt, A., Pasquali, L., Wu, H., & Noé, F. (2020). Deep learning Markov and Koopman models with physical constraints. Proceedings of Machine Learning Research, 107, 451-475.\n\nThe squared relative residual proposed in this paper has similarities to the VAMP-E score explored by Wu and Noé in their work on VAMP [4]. The VAMP score framework has served as a basis for many deep learning models, including VAMPnets [2], state-free reversible VAMPnets and GraphVAMPnets. It would be beneficial for the authors to position their spectral residual measure within this established framework, providing a comparative analysis or highlighting any differences in formulation or performance.\n\n[4] Wu, H., & Noé, F. (2020). Variational approach for learning Markov processes from time series data. Journal of Nonlinear Science, 30, 23-66."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "See the Summary"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "A collection of questions and suggestions have been made in the Weaknesses section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The description and clarity of the proposed approach is good overall.\n- The proposed approach is theoretically justified by the guarantees of the ResDMD method it is built upon.\n- The clustering results on the neural dynamics experiment are promising.\n- The code for the experiments is already available online."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new method, NN-ResDMD, to estimate the spectral components of Koopman operators. It builds upon Residual Dynamic Mode Decomposition (ResDMD) and uses a neural network to identify basis functions instead of manually selecting them."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**I do not think this paper makes a sufficiently strong contribution**. My understanding is that the proposed approach is simply ResDMD where the basis functions are parametrized by a neural network instead of specified manually, and then iteratively optimized. In my opinion this paper is better suited as a workshop paper in its current form.\n\nThe authors use feedforward neural network but the details of the architecture of the feedforward neural network are not provided. The authors **did not investigate the use and choice of other typically better-performing neural network architectures**. I don't think this should be left as future work but should already have been investigated. Similarly, exploring the direction of integrating the proposed approach with PINNs/PINOs would have made the contribution of the paper stronger, instead of leaving it as future work.\n\nThere is **no discussion of the computational cost** of the approach, which could be a big drawback of the proposed approach. As far as I understand it, the single evaluation (or few evaluations) of the eigenpairs in ResDMD is replaced by an iterative process where each iteration requires the evaluation of the eigenpairs in NN-ResDMD. This is an expensive part of the algorithm and there are no guarantees for the number of iterations required, so the running time there could be many times larger than for the classical ResDMD. In addition, there is also the cost of the additional optimization steps which can be very large if the neural networks are also large. Overall, the proposed algorithm seems significantly slower than existing approaches, so a trade-off between accuracy and computational time needs to be very carefully discussed theoretically and empirically.\n\nMore generally, the **limitations of the algorithm are not discussed**.\n\n**The results of the experiments are not clear**\n- The results of the pendulum experiment are not clear. Need to specify more precisely and explicitly what the ground truth is to understand if these are good/bad results. Why are the results of the NN-ResDMD approach shaded areas while all the other methods are displayed using points? The shaded area also has a large radius, so maybe the results are not as good as stated compared to Hankel-DMD for instance for which the points remain close to the unit circle.\n- The results of the turbulence experiments are not clear, since there is no ground truth provided. I am not familiar with that experiment so I do not know what the results are supposed to look like. It is unclear to me why the NN-ResDMD results in Figure 5 are considered good, while those in Figure 7 for Hankel-DMD are considered bad.\n- The results of the neural dynamics experiments could be made clearer. Why do you choose a different number of eigenfunctions for the different approaches? Is this a fair comparison? Figure 6.a., 9.a. and 10.a. show the decomposed eigenfunctions for the different approaches. There is no ground truth provided, so it is not clear to me what we learn from these plots.\n\nGiven that the proposed approach is compared to Hankel-DMD in all the numerical experiments, it would be worth detailing what that approach actually does compared to the other DMD approaches discussed in the paper. Maybe that was the original aim of Appendix A.5 which has been left empty.\n\nThe diagram in Figure 1 needs to be cleaner and more \"professional\"\n\nMake sure to specify the variables over which the optimization is performed in equations (3.4) and (3.5)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In Fig. 4, the spectrum of the estiated Koopman operator by NN-ResDMD is distributed on the unit circle. On the other hand, the spectra of the estimated Koopman operators by other methods are distributed also inside the unit circle. Is the dynamical system measure preserving, or did you only focus on the spectra on the unit circle?\n\nMinor comment: \nIn line 208, do you mean $(G+\\sigma I)^{-1}$ instead of $\\dagger$ since $G$ is positive-semi definite?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "ResDMD is a powerful tool to observe the spectra of Koopman operators. The authors combine neural networks and ResDMD to make ResDMD more flexible method. The topic is interesting and relevent to the community. The paper is well-organized and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose Neural Network-ResDMD (NN-ResDMD), where the dictionary functions are automatically selected by neural networks. The network is trained by minimizeing the loss function that is related to the residual of the eigenvalue problem of the Koopman operator. Numerical results are also illustrated to confirm the behavior of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The authors insist their proposed method automatically select basis functions, eliminating the need for manual intervention. I understand that in practice, applying neural networks to find proper basis functions is more flexible. However, theoretically, advantages of applying neural networks to estimating Koopman operators and their eigenvalues are not clear for me. They assume there is a basis function $\\psi_i$ and construct a neural network that can sufficiently approximate $\\psi_i$. Could you clarify what is $\\psi_i$? And, what impact on estimating Koopman operators and their eigenvalues is induced by the error of the approximation of $\\psi_i$ by using the neural network?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024nnresdmd,\ntitle={{NN}-Res{DMD}: Learning Koopman Representations for Complex Dynamics with Spectral Residuals},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=53xxT3LwJB},\nnote={under review}\n}"
},
"abstract": {
"value": "Analyzing long-term behaviors in high-dimensional nonlinear dynamical systems remains a significant challenge. The Koopman operator framework has emerged as a powerful tool to address this issue by providing a globally linear perspective on nonlinear dynamics. However, existing methods for approximating the Koopman operator and its spectral components, particularly in large-scale systems, often lack robust theoretical guarantees.\nResidual Dynamic Mode Decomposition (ResDMD) introduces a spectral residual measure to assess the convergence of the estimated Koopman spectrum, which helps filter out spurious spectral components. \nNevertheless, it depends on pre-computed spectra, thereby inheriting their inaccuracies. \nTo overcome its limitations, we introduce the Neural Network-ResDMD (NN-ResDMD), a method that directly estimates Koopman spectral components by minimizing the spectral residual. By leveraging neural networks, NN-ResDMD automatically identifies the optimal basis functions of the Koopman invariant subspace, eliminating the need for manual selection and improving the reliability of the analysis.\nExperiments on physical and biological systems demonstrate that NN-ResDMD significantly improves both accuracy and scalability, making it an effective tool for analyzing complex dynamical systems."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Koopman operator",
"data driven dynamical system",
"dynamic mode decomposition"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3cd2cd49252dd7db8627463a2090a1dfb6f8179d.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/b79d28d3e6ec81b18e9bc30b118e6597ffe5f858.zip"
},
"title": {
"value": "NN-ResDMD: Learning Koopman Representations for Complex Dynamics with Spectral Residuals"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
54KcduuYeG | AutoScale: Automatic Prediction of Compute-optimal Data Compositions for Training LLMs | main | Active | Data Curation;Data Composition;Scaling Laws;Data-centric AI;Large Language Models (LLM) | foundation or frontier models, including LLMs | 3;5;5;8 | 2;3;4;4 | 2;3;2;4 | 2;3;2;4 | 2;3;3;3 | 5.25 | 3.25 | 2.75 | 2.75 | 2.75 | 0.802181 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. If I understand correctly, for the downstream tasks, the evaluation metric used is perplexity. Why is perplexity chosen as the metric instead of one that is specific to the dataset or task itself?\n2. Is there any potential explanation for why AutoScale doesn't perform as well on encoder-only models compared to decoder-only models?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. AutoScale presents an interesting idea that distinguishes it from previous work, demonstrating that the optimal weights are only effective at the scale they were optimized for and become suboptimal when applied to other scales. It offers a practical method for automatically and efficiently determining domain weights when train large language models. \n2. The experiments are conducted on both encoder-only and decoder-only models and shows good results on decoder-only model. \n3. The work is supported by both empirical experiments and mathematical formulations. Additionally, the diagram in the paper is well-designed and effectively conveys the underlying concepts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors address an interesting topic in this paper: a method for automatically optimizing the mixture proportions of pretraining data domains when training language models. \nThey begin by formulating the optimal mixing problem as a bi-level optimization and then propose the Direct Data Optimization (DDO) algorithm to formalize the relationship between optimal data compositions and training data scales. Using DDO, they conduct empirical studies to optimize domain weights at various training data scales, demonstrating that the optimal data composition varies with the scale of the training data. Finally, they introduce AutoScale, which automatically predicts optimal training data compositions at larger scales based on compositions optimized at smaller scales. \nAdditionally, their evaluation of AutoScale on both decoder-only and encoder-only models demonstrates its ability to achieve computational savings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The experimental setup is not entirely convincing:\n\n* The models used (a 774M decoder-only model and a 110M encoder-only model) are relatively small compared to today’s large language models, making it difficult to gauge performance at a larger scale.\n* The data size is limited to 3B, 5B, and 10B tokens, with results in Table 1 only reflecting the 3B set.\n* Figure 3(b) lacks explanation, and the cola baseline and DDO performance seems unusually low, falling below random guessing (0.5). Also, stsb baseline seems low too.\n\n2. The evaluation of downstream tasks could be expanded. It would be helpful to see the models' performance on more complex tasks, such as mathematical problem-solving."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- I am a bit unclear about your definition of \"equivalent data size\" at L243, what's the equivalence about (i.e., which size and which size)? Note that I understand the meaning of $N_I^0$, just wondering the terminology here.\n- Maybe I missed something, but how do one control the budget for the next $N^(3)$? It seems the amount of tokens is defined by the initial weights of $N^(1)$ and $N^(2)$. Or in other words, say I need to find a optimal weight for a total token of 300B, how should I start with $N^(1)$ and $N^(2)$?\n- Adding to the prior question, if the optimal ratio of each domain follows a exponential function, after taking a few data points using AutoScale, can we simply fit the exponential function instead of using the AutoScale iterative method? You seem to be using that in Figure 1 (d). If y es, this simply answer my question above.\n- While the problem of different data scale is resolved with a scaling law solution, can we also use a similar approach on model scale? Even though the cost of using a small amount of data for a larger model should be within a low percentage of the total training cost, setting up the experiment for the larger scale is non-trivial. It'd be nice to have a function that can predict the loss across model scales."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- This paper analyzes an important problem, data weighting of LLM training, which can improve the training efficiency with reasonable cost. It also presents an actionable algorithm for LLM training.\n- The proposed method assumes a power law formulation which makes the data weighting problem practically solvable. It is important to point out that data weights is scale dependent.\n- The empirical results and findings on the corpus weighting align with common belief of the community, such that further up-weighting high quality source is less effective, and books and web documents continue to be important at larger scale. This shows that the proposed method has strong explanatory ability.\n- The experiment is quite thorough, considering the cost for training models is quite high even at small scales."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of predicting optimal data mix for a given compute budget (i.e., fixed total token count and model size). A key challenge here is that the optimal domain weighting may change at different scale, hence it is inaccurate to use smaller models to predict large model performance, while solving the optimization problem at the large model scale directly is computationally infeasible (requires multiple retraining).\n\nThe paper proposes a method that work on one domain at a time by fixing the rest of the data constant (hence the loss is constant for other domains too), and estimated a scaling law per domain. The power law parameters $\\gamma_i$ and $l_i$ can be easily estimated, which approximate a regular data scaling function where $l_i$ is the irreducible loss of that domain. \n\nAfter the power law of each domain is found, the final objective is to mix the data so that the loss is minimize while keeping sum of the tokens reaches the budget, which becomes a convex function that can be solved efficiently. This gives the DDO method. The different $\\gamma_i$ explains why there is a differet mix at different stage.\n\nA method \"AutoScale\" is further proposed to obtain the data weight of a larger token budget, by iteratively mxing two data weights at different scale to create the weights of the next one. \n\nThe proposed approach is tested on models like GPT-2 (autoregressive) and BERT (bidirectional), showing improved convergence rates and downstream task performance. Empricially, the results show AutoScale’s ability to shift data weights, favoring diverse data sources like CommonCrawl at larger scales while reducing reliance on traditionally high-quality, standard-format data such as Wikipedia. These findings match the empricial findings of the data weights used for prior succesful models such as Llama."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I wonder if carbon footprint of the experiments here should also be reported\n- The presentation is good but can still be improved. The core method part can be improved with additional intuitive explanations and better use of notations. Further, I have noted down some minor typo/notation errors:\n\nTypo or notation:\n - L258: $N_i^+2$ should probably be $N_i^+$\n - L526: Ecoder -> Encoder \n - L200: probably should better use $N_i$ instead of $|S_i|$\n - L281: $w_i'*N'$ appears twice.\n - Figure 2 caption: meaning of (original = 1) is a bit unclear\n - L380: $N$ is missing in the first equation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Fig 1: [a] suggests that you've tuned 6 models between 30M and 1.2 B tokens, yet [c] shows only three models being used to fit the predictor model. Why is that? where are the other data points? And are *all* of the linear fits R2=0.998? Is that the average R2? Also, [d] shows the predictions of the model extrapolated past 1.2 B to 307 B? Why are you not showing the training data points (between 30M and 1.2 B) as well? And isn't the largest model you look at trained to 10B? why show this extrapolation to so far beyond where you explore? This seems misleading. The x-axis should say (log scale) as well. In [b] the color used for the 1.2B model is the color used for the 0.6B model in [a]. And there is a typo in the title ('scale - depedent' -> \"dependent\"). In [e] the 38% improvement looks to be overstated due to the noise of those evaluation curves, you could just as easily pick out the peak in Autoscale curve at step 86k and the point in the Uniform curve at step 100k to get a smaller improvment result with the same underlying data. \n\nTable 1: boolq has the Autoscale value bolded as 'best' but the Data Mixing Laws value is greater. Also, consider place your method on the bottom row separated by a thin line.\n\nFig2: What is being depicted here? Is this showing power laws being fit to 3 empirical datapoints? Is the first column of points supposed to be at 0? It looks like the points are at [0.2, 1, 3] on the x-axis? \n\n\nNits:\n\nThroughout: \"AutoScale\" is consistently the wrong font size. Please fix. Similarly, in section 5.2 the font size of the methods needs to be fixed. And in line 418 'from' is included in the method font instead of the default font.\n181: work contribute -> contributes\n379: N^(1)* is missing the N in summation\n465: much lowered -> much lower\n155 'a consistent shift can be observed', please be more specific, what is shifting, how is it consistent?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The general problem the authors attempt to address is important, and the assessment that present methods are limited and that performance headroom is available is well-framed. The code is open-sourced. The evaluations presented are limited, but positive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work presents a method to estimate optimal data domain weights at large training-data scales by extrapolating via exponential functions fit to smaller-scale training runs. The proposed method is evaluated on GRT-2 Large + RedPajama and BERT pretraining, and compared to extant method baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In general, the writing is difficult to parse. It is frequently frustratingly vague, including in the Abstract, where the actual method is alluded to but not elucidated. In the actual methods section, important questions about the method are unanswered, leaving the method underspecified (is the learning rate schedule (linear? presumably linear decay?) the same for the tuning runs as for the final run? Is the decay timing adjusted to the compute budget? The value of the final validation loss hinges critically on this - yet it goes unmentioned). There is no addressing of the profound difficulties this method (and others like it) can be expected to have around epochs for individual datasets. A more thorough analysis would identify and investigate this issue with experiments demonstrating specific datasets being sampled for > 1 epoch, and the subsequent breakdown of the \"scaling law\" prediction. Evalutation is purely comparative to other methods, and does not assess to what extent the predicted 'optimal' values might differ from more expensively traditionally-derived 'optimal' values. No discussion of the relative cost of the method (with its 'linearly scaling' cost in the number of datasets) is mentioned, though it is clear it would become prohibitively expensive for dataset mixtures with more than a handful of individual datasets. The method proposed is prohibitively expensive at large model sizes, and seems unlikely to scale to larger compute budgets even at small dataset sizes due to the issue of datasets passing through multiple epochs, which is unaddressed in this work. This limitation goes unmentioned."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Even at a smaller scale, I see opportunities of clear promise where we could have had more points between 0.3B and 1.2B and show some trend. Any specific reason this was not done/ increased to more than 1.2B ? With scale, a lot of problems disappear that are apparent at lower scales."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* Very strong work in terms of the hypothesis and experimental setup albeit at smaller scales. The promise of finding optimal weights for training large networks without having to guesstimate it is a very attractive proposition. \n* The plots are really well done. They drive the main idea of the paper very well(especially Fig 1 (a, e) )"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a method called “AutoScale” that helps predict the optimal composition of pre-training data for LLMs. It challenges the conventional notion of determining this via small scale experiments and simply applying them to a large scale where two axes change (data scale, parameter count). The experiments show a very promising line of research and it was a pleasure to read. \n\nI couldn’t check the math as well as I would have liked to."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I would like to list the following weakness fully ensuring the authors that I am not unreasonable and am completely open to increasing my score if these are addressed/answered satisfactorily.\n* The work proposes using a different approach to finding optimal data weights for a given pre-training compute budget. This is well explained via results but does in fact require training the original size model. Given that we obtain suboptimal performance via the conventional way( smaller model, fewer data), an analysis showing how much performance could be gained by spending the compute and training these (equal parameter) networks would be useful. \n* For Takeaway 1, Fig 1(b) only has 2 data points. Additional points would help make the case stronger. It’s a tough sell to make such a bold statement with two data points. But I’m hoping I am wrong :) \n* Maybe I missed this, but the repeated claims that Wikipedia should be sampled less at a higher scale is a result of the OLS fit. But no experiment actually confirmed this fact in the paper, right ? Since the max scale was 1.2B ? Please correct me if I’m wrong.\n\nGeneral Comments/Typos:\n* [Section2] : “this work contribute” -> “this work contributes”\n* [Section 3.1] : wi = Ni/N => wi = Si/N ?\n* [Algorithm 1] : Train the model on data S = ({S1 . . . Sm} \\ Si) => S = ({S1 . . . Sm} \\ Sj) ? \n* Some of the font sizes are very distracting to read."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose AutoScale, which automatically predicts compute-optimal data compositions for training LLMs at the target training data scale."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024autoscale,\ntitle={AutoScale: Automatic Prediction of Compute-optimal Data Compositions for Training {LLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=54KcduuYeG},\nnote={under review}\n}"
},
"abstract": {
"value": "Domain reweighting is an emerging research area aimed at adjusting the relative weights of different data sources to improve the effectiveness and efficiency of language model pre-training. This paper demonstrates that the optimal composition of training data from different domains is scale-dependent, challenging the existing practice of determining optimal mixtures through small-scale experiments and directly applying them at larger scales. We derive an analytical model for the dependence of optimal weights on data scale and introduce *AutoScale*, a novel, practical approach for optimizing data compositions at potentially large training data scales. *AutoScale* first uses a principled optimization framework to find optimal compositions at smaller, feasible scales, then predicts optimal compositions at larger scales using our derived model. Our evaluation on GPT-2 Large and BERT pre-training demonstrates *AutoScale*'s effectiveness in improving training convergence and downstream performance. Particularly, for GPT-2 Large on RedPajama, *AutoScale* decreases validation perplexity 28% faster than baselines, with up to 38% speed-up over unweighted training, achieving the best performance across downstream tasks. This work provides insights into the varying benefits of data sources across training scales for language models, contributing to the burgeoning research on scale-dependent data curation. Code is open-sourced"
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Data Curation",
"Data Composition",
"Scaling Laws",
"Data-centric AI",
"Large Language Models (LLM)"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/39bf08753f3b74d26b72f7d4723fb0ebbcd5b1dc.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/61c26777f8a43234e782dd6eec90b596cb6d9c7f.pdf"
},
"title": {
"value": "AutoScale: Automatic Prediction of Compute-optimal Data Compositions for Training LLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
54XlM8Clkg | Point Cluster: A Compact Message Unit for Communication-Efficient Collaborative Perception | main | Active | Point Cluster;Collaborative Perception | applications to computer vision, audio, language, and other modalities | 6;6;6;8 | 2;3;1;4 | 3;4;2;4 | 3;3;2;4 | 3;4;3;4 | 6.5 | 2.5 | 3.25 | 3 | 3.5 | 0.774597 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 1
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "No questions."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. **Bandwidth Efficiency**: The proposed \"point cluster\" message unit significantly reduces communication bandwidth by capturing only essential foreground object information in a sparse format, making it highly efficient compared to dense representations like raw point clouds and BEV maps.\n\n2. **Enhanced Object Detection Accuracy**: By preserving detailed structural and semantic information, the point cluster improves object detection accuracy, especially in complex multi-agent scenarios, demonstrating superior performance on established collaborative perception benchmarks.\n\n3. **Robustness to Real-world Challenges**: The CPPC framework includes robust mechanisms for handling pose errors and time delays, crucial for real-world applications. Parameter-free solutions for pose and latency issues make it adaptable to various levels of noise without additional tuning.\n\n4. **Clear and Effective Writing**: The paper is well-written, with a clear explanation of the proposed methods and thorough descriptions of experiments, which makes the complex technical content accessible and supports the credibility and reproducibility of the research findings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a new message unit, the \"point cluster,\" to improve collaborative perception efficiency in multi-agent systems. Unlike existing message units such as raw point clouds, bounding boxes, or BEV maps, the point cluster format minimizes bandwidth usage while retaining essential structural and semantic information. Representing objects with point coordinates, a cluster center, and semantic features, this approach allows efficient inter-agent information exchange, enhances object alignment, and preserves object structure for more accurate detection. A new framework, CPPC, combines point packing and aggregation modules, addressing issues like bandwidth constraints, time delay, and pose errors, and achieves state-of-the-art performance on several benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In terms of methodology, there is a noticeable reliance on FSD, which slightly reduces the originality. However, overall, the approach is still reasonable."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "If we assume some objects are moving at high speeds, I think the errors caused by delay would vary for each object. It seems this aspect might not be covered, does the proposed method handle this problem?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "This paper systematically analyzes the advantages and disadvantages of existing collaborative perception methods (early, intermediate, late collaboration) and proposes a rational solution considering the inherent trade-off between communication efficiency and perception performance. \nThe proposed point cluster is a compact message unit that selectively combines the strengths of existing methods and efficiently represents both structural and semantic information of objects. \nCPPC introduces parameter-free approaches to handle pose error and time latency issues encountered in real-world scenarios, ensuring robustness to various noise levels. \nThis paper clearly identifies the current challenges and limitations in the field of collaborative perception and demonstrates how the proposed method can rationally solve them.\nThrough clear problem formulation, the collaborative perception problem is mathematically defined, and based on this, the motivation and principles of the proposed method are lucidly explained."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel message exchange unit called point cluster for collaborative perception. Point clusters can efficiently and compactly represent an object's location, structure, and semantic information. The proposed CPPC framework includes point cluster-based encoding, packing, exchange, and integration methods. CPPC improves both communication efficiency and perception accuracy while being robust to various real-world noises. Experimental results on various benchmarks demonstrate that CPPC significantly outperforms existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Based on the proposed technique, the amount of data that can be transmitted is limited. Therefore, it is crucial to prioritize foreground objects and bring them in a sparse manner to distinguish the overall shape, rather than bringing only a few parts. Regarding this aspect, I wonder if there are any criteria or tendencies for determining which points should be selected and what rules should be followed. Additionally, I am curious about the extent of performance degradation when background information is included instead of solely focusing on the foreground. \n\nIf we follow the logic mentioned above, I wonder if we can expect sufficient performance improvement in early or late collaboration by only bringing foreground information and adjusting it according to the available bandwidth. \n\n- In Figure 4(a), there is a section where the performance of the proposed method rises sharply. I am curious about the reason behind this phenomenon and how the performance of the proposed method would differ from existing technologies if the communication volume is lower than this point. \n- On Page 7, Line 373, it is mentioned that the proposed method shows improvements of 5.7%, 7.3%, and 12.8%, but it is unclear which methods are being compared. \n- On Page 7, Line 377, the term \"late collaboration\" is used. I am curious about which specific technique this refers to."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "My current concerns are mainly about the technical contributions of the proposed method. I may change my rating after reading other reviewers' comments and the authors' rebuttal. Here are some questions I wish could be addressed:\n\nQ1: In the proposed SD-FPS method, how to balance the weights of semantic and spatial distances ($\\lambda_s$ and $\\lambda_d$)? How will they affect the performance of the proposed method?\n\nQ2: Does the proposed method perform well in a crowded environment where many objects are exhibited?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "\\+ This paper is written well and the motivation behind the proposed method is easy to follow.\n\n\\+ The proposed method is a systematic solution for multi-agent perception, which exploits point clusters as the intermediate representation with controllable communication costs, to balance efficiency and effectiveness. \n\n\\+ The proposed method outperforms several previous methods on three public datasets consistently. The extensive experimental results validate the effectiveness of the proposed modules."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a communication-efficient collaborative perception (CPPC) framework for vehicle-to-everything autonomous driving. Unlike previous methods that mainly rely on BEV features, the CPPC framework leverages point clusters to control its computational complexity and alleviate issues like high-level information loss. Specifically, the CPPC framework consists of a point cluster picking module, a pose alignment and latency compensation module, and a point cluster aggregation module to generate cluster features for subsequent predictions. The CPPC framework outperforms existing BEV-based methods on three public datasets, including V2XSet, OPV2V, and DAIR-V2X-C."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "\\- There are four thresholds used in the proposed method, which may be hard to choose in complex or unseen scenarios.\n\n\\- The proposed method mainly considers BEV-based methods for comparisons, however, it is unclear how significant its technical contributions are compared with previous point-cloud-based methods (e.g., Cooper, F-cooper). I am concerned about this because the techniques used in the main components of the proposed method, including point cluster picking, pose correction, and the SD-FPS, are quite common in the research communities of point cloud processing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In section 3.5, can the authors detail \"spatial cluster matching\"? If the pose between two agents is not accurate, how to match point clusters from different agents into a single object?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Using point cluster as an intermediate message unit is novel and well-motivated.\n2. The proposed CPPC framework, as well as the PCE, PCP and PCA modules are solid.\n3. State-of-the-art performance on mainstream benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes to use Point Cluster as the message unit for collaborative perception. Previous intermediate methods use BEV map as the message unit, which suffer from weak object features, inefficient message aggregation and vague boundary. The proposed point cluster-based framework can solve these problems well. Extensive experiments on V2XSet, OPV2V, and DAIR-V2X-C demonstrate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concerns are about the effectiveness and efficiency of the proposed method, for which I think more ablation studies are required.\n1. Ablation studies on PCP, PCA, pose correction and latency compensation are required.\n2. The authors claim the BEV representation is inefficient during aggregation. Is there any comparison between BEV and Point Cluster-based methods?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024point,\ntitle={Point Cluster: A Compact Message Unit for Communication-Efficient Collaborative Perception},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=54XlM8Clkg},\nnote={under review}\n}"
},
"abstract": {
"value": "The objective of the collaborative perception task is to enhance the individual agent's perception capability through message communication among neighboring agents. A central challenge lies in optimizing the inherent trade-off between perception ability and communication cost. To tackle this bottleneck issue, we argue that a good message unit should encapsulate both semantic and structural information in a sparse format, a feature not present in prior approaches. In this paper, we innovatively propose a compact message unit, namely point cluster, whose core idea is to represent potential objects efficiently with explicitly decoupled low-level structure information and high-level semantic information. Building upon this new message unit, we propose a comprehensive framework CPPC for communication-efficient collaborative perception. The core principle of CPPC is twofold: first, through strategical point sampling, structure information can be well preserved with a few key points, which can significantly reduce communication cost; second, the sequence format of point clusters enables efficient message aggregation by set matching and merging, thereby eliminating unnecessary computation generated when aligning squared BEV maps, especially for long-range collaboration. To handle time latency and pose errors encountered in real-world scenarios, we also carefully design parameter-free solutions that can adapt to different noisy levels without finetuning. Experiments on two widely recognized collaborative perception benchmarks showcase the superior performance of our method compared to the previous state-of-the-art approaches."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Point Cluster",
"Collaborative Perception"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a707dc6cc0d9f85f9b3b3daa8da754cb500e3e36.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/e2e36c951a5a727fdad79b13ce043dc1fb56bbcc.zip"
},
"title": {
"value": "Point Cluster: A Compact Message Unit for Communication-Efficient Collaborative Perception"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
54jmXCHrTY | Understanding Self-supervised Learning as an Approximation of Supervised Learning | main | Active | representation learning;self-supervised learning;contrastive learning;theoretical framework | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;5;5;8 | 4;4;4;3 | 2;2;3;3 | 2;2;3;4 | 2;3;3;4 | 5.25 | 3.75 | 2.5 | 2.75 | 3 | -0.889297 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. They proposed a novel theoretical framework that connects supervised and self-supervised learning.\n2. They introduction of the concepts of prototype representation bias and balanced contrastive loss, which play important roles in the connection.\n3. They offer some practical insights based on their framework."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper formulates self-supervised learning (SSL) as an approximation of supervised learning (SL), deriving a loss function related to contrastive losses. The author introduce the concepts of prototype representation bias and balanced contrastive loss, providing some insights into SSL. They conduct experiments to validate their theoretical results"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I think the main issue with this paper is its insufficient theoretical contribution. Using the prototype representation bias, as defined by the authors, to represent the gap between SSL and SL is overly simplistic. In reality, no practical augmentation can achieve a very low prototype representation bias unless label information is available. Moreover, augmentations with the same prototype representation bias might exhibit vastly different downstream performance, depending on the finer relationship between the augmentation and the data—a topic the authors have not addressed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In the experimental section with balancing parameters, how are the values in the Figure 4 obtained? Is this the single run or averaged across n?\n\nCan you please elaborate more on $\\nu$ used in the proposed total loss? It seems to not be available and has not analogous term in NT-Xent loss."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "It is safe to say that theoretical understanding of self-supervised learning methods is relatively lacking despite increasing interest and effort. Thus, the submission addresses an important topic and provides a clear connection to the supervised counterpart. It is generally well structured which makes it easy to follow, and provides a clear intuition of the approach. The approach covers typical components of self-supervised methods like Siamese networks, data augmentation and contrastive loss."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The submission proposes a derivation of self-supervised learning problem as an approximation of supervised learning. To this end, in supervised learning formulation the authors replace the labels with prototype representations given by an oracle. These prototype representations can then be modelled via the expected representation of objects sampled from a conditional distribution (x conditioned on label y) and across augmentations, i.e. $\\mathbb{E}_{t,X|y} \\ f(t(x))$. Learning under this formulation can be achieved via triplet loss, i.e. attracting positive samples (sample from one class) and repelling negative samples (samples from different classes).\n\nIn self-supervised learning, however, one has no access to labels which renders prototype representations unavailable. Instead, the authors use surrogate prototypes, i.e. expected representation of sample across its augmentations, i.e. $\\mathbb{E}_t \\ f(t(x))$. The authors then provide an upper bound on the loss, which yields objective called balanced contrastive loss, and show its connection to NT-Xent loss used in SimCLR. One may measure the bias introduce by the surrogate by taking the expectation of the difference between the true and surrogate representations, called prototype representation bias. The bias is shown to correlate with downstream performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the submission addresses an interesting connection to supervised learning, the connection has been addressed in previous literature [1], for example attraction/repelling and normalization has been brought up in [2]. Further, the consequences of the proposed framework seem to provide limited insight. While they provide supporting arguments for siamese architecture, data augmentation and infoNCE loss from a supervised perspective, the framework seem to conflict with the use of projection head and aggressive data augmentation. Let me elaborate on this further.\n\nIf SSL is an approximation of supervised learning, then on downstream task the use of the output of projection head should be more beneficial than the pre-projection features. However, this is not what one faces in practice. Interpreting SSL via supervised learning may inhibit understanding the use of projection head. Highlighting the mismatch between pretext and downstream tasks is important if we to gain practical consequences in designing SSL methods. The proposed interpretation, on the contrary, seem to sweep this distinction under the rug.\n\nFurthermore, SSL methods use more aggressive augmentation strategies than those used in supervised learning, while aggressive augmentation negatively impacts supervised learning [3]. This is also something that seem to be out of tune with the proposed approach.\n\nSimCLR-type losses are well understood from many perspectives, including spectral and information-theoretic [4,5], so it is not fair to render them as only intuitively and experimentally supported.\n\nThe authors introduce assumptions on the choice of similarity measure and use of normalization to derive the proposed loss, which is shown to generalize NT-Xent used in SimCLR. The assumptions are needed for the derivation, but I don't think one would need to additionally show their significance empirically, especially when this is already an established practice and has been ablated multiple times in the literature. Similar issue with experiments on balanced dataset. This seems to eat up space and doesn't reveal anything new about SSL methods.\n\nReturning to the generalization of supervised learning problem from predicting labels to predicting prototype representations, this step is important but receives limited discussion in the submission. Since there are multiple target tasks for supervised training, the ideal prototype representations are as well target-specific here. How does this affect the overall framework? \n\n[1] Saunshi, Nikunj, et al. \"A theoretical analysis of contrastive unsupervised representation learning.\" International Conference on Machine Learning. PMLR, 2019.\n\n[2] Wang, Tongzhou, and Phillip Isola. \"Understanding contrastive representation learning through alignment and uniformity on the hypersphere.\" In International conference on machine learning, pp. 9929-9939. PMLR, 2020.\n\n[3] Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International conference on machine learning. PMLR, 2020.\n\n[4] Balestriero, Randall, and Yann LeCun. \"Contrastive and non-contrastive self-supervised learning recover global and local spectral embedding methods.\" Advances in Neural Information Processing Systems 35 (2022): 26671-26685.\n\n[5] Oord, Aaron van den, Yazhe Li, and Oriol Vinyals. \"Representation learning with contrastive predictive coding.\" arXiv preprint arXiv:1807.03748 (2018)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1- How would the assumptions, such as balanced data and the specific choice of cosine similarity, affect the generalizability of this framework to domains with significant class imbalance or non-standard data representations?\n\n2- To what extent can prototype representation bias be quantitatively minimized through practical data augmentation strategies? Would further analysis on this bias's impact across different datasets yield consistent trends?\n\n3- Could the theoretical framework be adapted or extended to include asymmetrical architectures, given their prominence in modern self-supervised learning algorithms? What additional assumptions might be required?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1- The paper's primary strength lies in its establishment of a rigorous theoretical framework that bridges supervised and self-supervised learning, addressing a significant gap in the literature by grounding widely-used contrastive losses in theory. This approach contributes to the field by potentially enhancing the interpretability and rationale behind existing self-supervised methods.\n\n2- The authors derive a self-supervised learning loss function from first principles, aligning it with established methods like the NT-Xent loss in SimCLR. This derivation offers the self-supervised learning community a deeper understanding of why and how particular loss functions, often implemented heuristically, are effective.\n\n3- Introducing the concept of prototype representation bias, the paper reveals how self-supervised learning can be systematically evaluated and potentially optimized by minimizing this bias through data augmentation strategies. This is an innovative step that contextualizes the role of representation clustering within the self-supervised paradigm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper theoretically models self-supervised learning as an approximation to supervised learning. The authors derive a self-supervised loss related to contrastive losses, including InfoNCE, while introducing concepts like prototype representation bias and balanced contrastive loss. They apply the framework to analyze components of self-supervised learning, notably SimCLR, and explore the effects of balancing attraction and repulsion forces."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1- The authors define a surrogate prototype representation based on transformations (augmentations) of the same data point, but this choice may vary substantially across datasets and problem domains. Since many real-world applications use domain-specific augmentations (e.g., color transformations for medical images), the theoretical guarantees provided may not hold uniformly. A sensitivity analysis or empirical study on the effects of diverse augmentation choices would strengthen the validity of the surrogate representation assumptions.\n\n2- The paper introduces parameters (e.g., balancing factors like \n\\alpha and \\lambda Equation 12) that govern the relative strengths of attraction and repulsion forces in the derived loss function. However, it provides limited insights into how these parameters impact performance across diverse datasets and tasks. A deeper empirical analysis or sensitivity study on these parameters would make the findings more robust and practically usable. Additionally, discussing guidelines for optimal parameter selection based on dataset characteristics would improve the utility of the paper for practitioners.\n\n3- The paper situates itself within the context of contrastive learning methods, particularly NT-Xent and InfoNCE losses. However, there are alternative frameworks in self-supervised learning, such as clustering-based approaches (e.g., DeepCluster, SwAV) and bootstrapping methods (e.g., BYOL). While the authors mention these methods briefly, they do not provide a clear comparison or discussion of how their theoretical framework might align or diverge from these alternative approaches. Providing such a comparison could position the framework more effectively within the larger self-supervised landscape.\n\n4- The paper’s findings, especially around balancing attraction and repulsion forces, suggest potential for optimization. Yet, there is minimal exploration of how the theoretical insights could inspire specific algorithmic modifications or optimizations for contrastive learning. For example, insights into prototype bias could be used to dynamically adjust the loss during training. Discussing these possibilities would improve the paper's impact by suggesting actionable ways to leverage its contributions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the theoretical analysis section, only the proof of two upper bounds is provided. My question is: What is the relationship between these two upper bounds and the supervised learning paradigm? In other words, what is the significance of their insights for the paper?\n2. What is the significance of self-supervised learning from this perspective? At a high level, beyond some conclusions that are very similar to [a], it is difficult to capture additional information. \n\nReference:\n[a] Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International conference on machine learning, pp. 9929–9939. PMLR, 2020."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-organized, with clear subsections that logically flow from theoretical foundations to empirical validations.\n2. The mathematical derivations and proofs in this paper seem appropriate."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper focuses on understanding self-supervised learning, where the authors theoretically formulate the self-supervised learning problem as an approximation of a supervised learning problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the authors claim to propose a new perspective for understanding SSL, it seems to me that there is significant overlap with [a]. Unfortunately, the authors do not provide a detailed analysis of the similarities and differences between the two.\n2. The correlation between theoretical analysis and insights in this article is weak: In the theoretical analysis section, only the proof of two upper bounds is provided. My question is: What is the relationship between these two upper bounds and the supervised learning paradigm? In other words, what is the significance of their insights for the paper?\n3. In Section 6, the proposed new SSL format does not seem to have significant advantages. In addition, the authors also do not conduct sufficient verification experiments.\n4. In fact, there are many related works (like [b-c]) on the understanding of SSL, unfortunately, this paper does not provide a detailed discussion and analysis of their differences.\n\nReferences:\n[a] Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International conference on machine learning, pp. 9929–9939. PMLR, 2020.\n[b] Tian Y, Chen X, Ganguli S. Understanding self-supervised learning dynamics without contrastive pairs[C]//International Conference on Machine Learning. PMLR, 2021: 10268-10278.\n[c] Purushwalkam S, Gupta A. Demystifying contrastive self-supervised learning: Invariances, augmentations and dataset biases[J]. Advances in Neural Information Processing Systems, 2020, 33: 3407-3418."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We provide a theoretical framework that conceptualizes self-supervised learning as an approximation of supervised learning."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024understanding,\ntitle={Understanding Self-supervised Learning as an Approximation of Supervised Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=54jmXCHrTY},\nnote={under review}\n}"
},
"abstract": {
"value": "Self-supervised representation learning has mainly advanced in an empirical rather than theoretical manner. Many successful algorithms combine multiple techniques that are supported by experiments. This approach makes it difficult for the community to understand self-supervised learning fundamentally. To help settle this situation, we take a principled approach. We theoretically formulate a self-supervised learning problem as an approximation of a supervised learning problem. From the formulated problem, we derive a loss that is closely related to existing contrastive losses, thereby providing a foundation for these losses. The concepts of prototype representation bias and balanced contrastive loss are naturally introduced in the derivation, which provide insights to help understand self-supervised learning. We discuss how components of our framework align with practices of self-supervised learning algorithms, focusing on SimCLR. We also investigate the impact of balancing the attracting force between positive pairs and the repelling force between negative pairs. The proofs of our theorems are provided in the appendix, and the code to reproduce experimental results is provided in the supplementary material."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"representation learning",
"self-supervised learning",
"contrastive learning",
"theoretical framework"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d22ef880b5556431124be5fcad3929ac50e13d2c.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/bf919428491c39d1a01b0f4060f980afdcffa801.zip"
},
"title": {
"value": "Understanding Self-supervised Learning as an Approximation of Supervised Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
55EO8gSCBT | Experimental Design for Nonstationary Optimization | main | Active | plasticity;continual learning;experiment design | transfer learning, meta learning, and lifelong learning | 3;5;5;5 | 4;3;4;5 | 2;2;3;2 | 2;2;3;3 | 3;2;2;2 | 4.5 | 4 | 2.25 | 2.5 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "What are the labels for both lines in Figure 3?\n\nWhat happens if you plot training loss and test loss in Figure 4? I suspect that could reveal more correlation. Particularly for ResNet in Figure 4b, as it gets to 100% train accuracy in most cases. \n\nIn Figure 5d, do you mean p(Method ranking **does not** change) instead of p(Method ranking changes)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This work is novel because there has not been any explicit focus on empirical design in plasticity research. It could be an important contribution to the community as it could speed up progress and provide a more unified focus for the field. \n\nThe studies about hyper-parameter selection protocol are useful as they could help develop methods that overfit less.\n\nResults show that the community needs to focus on test plasticity, which is interesting and needs to be evaluated further.\n\nThe study about the number of seeds needed for hyper-parameter selection and evaluation of a method is good, and it could save computation on many future experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors study the experimental design in non-stationary optimization. They explore various aspects of experiment design, from hyper-parameter selection protocols to the number of seeds needed for hyper-parameter selection. The paper contains some interesting results about hyper-parameter selection, number of seeds, experiment protocols, etc."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is not well-written and contains a lot of small errors, which reduces my confidence in the quality of the paper and the results presented therein. For example, there are no labels in Figure 3, and line 424 says \"Figure 5b and 5b\", a typo in the title of section 4.4, among many others. I suggest the authors spend some more time on writing (maybe by using Grammarly) and making sure the presentation is up to the mark.\n\nThe experiments with ResNet and permuted CIFAR are not useful. When inputs are permuted, all the spatial information in the image is lost. In such a case, convolutional networks like ResNet-18 are not useful. These experiments have not been done in the community either, people have only used feed-forward networks with Permuted Input experiments. The ResNet experiments on Permuted CIFAR-100 should be removed from the paper.\n\nWhat is plotted in Figure 2? Is it the performance of the best hyper-parameter configuration or the average performance across hyper-parameters? And what does \"best\" mean? Highest training accuracy or test accuracy or train for Figure 2a and test for Figure 2b? \n\nThe results presented in Figure 4 are used to argue that \"improving training does not end up correlating with ... improving model performance\" (lines 370-371). But that is not what the figure shows. Figure 4a clearly shows that there is a positive correlation between training accuracy and test accuracy. What Figure 4b shows is that for the best hyper-parameters, there is a weak or no correlation between the two. That just means that after a point, trainability does not improve generalizability. But that does not mean \"improving training does not ... correlating ... improving model performance\". The claim on lines 370-371 and the 3rd bullet point in section 5 need to be changed. \n\nIn the introduction and Section 4.5, the paper asks, \"How many tasks do you need to include in your training sequences?\" The answer should be infinity because we are in a lifelong learning setting. Do the authors mean, \"How many tasks do you need to include in your training sequences **for tuning hyper-parameters**?\" or does that statement mean something else?\n\n\nThis paper can be a good contribution to the community, but it is not up to the mark yet. I'm willing to increase my score if the authors address my concerns."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Figure 2 (presentation): It is difficult to infer conclusions from this plot due to the number of baselines and settings being compared in a single graph. I wonder if this information is not better represented in a table, because the relative ordering of the settings (on the x-axis) and the placement of MLP next to ResNet does not have any semantic meaning in the presentation of these results.\n\n- Figure 2 (results): I am surprised that there is so much variation in the results for different combinations of methods and nonstationarities. One thing potential problem is that the ranking is too sensitive to statistically insignificant performance differences. Do you know how these results would look like if average test accuracy was reported instead?\n\n- Figure 3: The lines in these graphs are not labelled, I assume one is for protocol 1 and the other is for protocol 2? If that is the case, I do not see that large of a difference between the two (except on shuffled CIFAR-10). Thus, I am uncertain about the conclusion that \"protocol 2 transfers better to the unseen data\". The conclusion suggested at the end of Section 4.1 does not seem well-supported by this data.\n\n- Clarification for statistical bootstrapping: what exactly is being resampled for the estimate? It is not clear how \"resampling the seeds\" means, because bootstrapping usually involves resampling from some data to construct an estimator.\n\n- Clarifying seeds in Section 4.1: Why are the total number of seeds quoted (n=20) unequal between protocol 1 and 2? It seems like that the seeds are partitioned between model selection and evaluation? As I understand the second paragraph, 10 seeds are used for model selection and 10 seeds are used for evaluation, yielding the total of 20? But in that case, why does protocol 2 only use 5 seeds?\n\n- There seems to be no clear takeaway in Section 4.2: it would be helfpul to also investigate the contributing factors for generally well-performing methods. For example, are the methods performant because they are more robust to hyperparameters (and hence, protocol 2 can easily identify good hyperparameters)? I do not think Appendix C answers this question.\n\n- Section 4.3: The strong conclusion here is valuable. I wonder if this conclusion depends on the plasticity-preserving method. I am not able to tell from Figure 4, but presumably some methods may better correlate train and test accuracy, which would be hidden in this combined analysis.\n\n- Section 4.4: Again, I wonder if the number of seeds needed to identify a good hyperparameter configuration depends more strongly on the method used for training, rather than the aggregate analysis.\n\n** Minor Comments\n- Many instances of \"boostrapping\" should be replaced with \"bootstrapping\""
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The problem being addressed in this submission is timely, understudied and important. \n- Empirically, the submission poses and answers several important questions regarding best practice for non-stationary optimization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors investigate the experimental procedures that are used to evaluate algorithms in continual learning settings.\nThe submission points out that these practices are usually unaddressed or only implicitly addressed in current experimental work in continual learning.\nBy focusing on a well-curated set of datasets, nonstationarities, methods and architectures, the submission poses a critical analyses of these practices and implicit assumptions.\nA few interesting conclusions include that maintaining trainability may not be indiciative of generalizability, and that several tasks may be needed to evaluate a set of hyperparameters for continual learning.\n\nI am currently rating this paper as marginally below acceptance. However, I am willing to increase my score if some of the concerns below are addressed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While most of the paper provides concrete takeaways which question current practice, there are some inconclusive results that could use further clarification\n- Several of the results aggregate performance across several different categories of methods (e.g., architectural(crelu), regularization and resetting). A few analyses on individual methods, non-aggregated, would improve the empirical results and add clarity.\n- The presentation of the paper is mostly good, but with room for improvement (see below)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could the authors conduct experiments on more realistic benchmarks from the literature or propose a new, more realistic benchmark? \n\nIn Fig. 9, the Online baseline (with no LoP technique) outperforms some LoP algorithms. What is the reason for this? Could it be due to insufficient hyperparameter tuning? \n\nThe paper suggests that test accuracy rankings differ significantly from train accuracy rankings. Could the authors quantify this gap for different algorithms? Although the rankings change, the actual performance difference might be minor. \n\nHow would CReLU perform compared to other methods if it used the same number of weights? \n\nWhat do the two curves in Fig. 3 represent? There are no legends. \n\nThere are also a few typos on page two."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Continual learning is a rapidly growing and important research area. An independent comparison of existing algorithms under a unified setting aids in identifying the most effective techniques, thereby guiding future research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents an empirical study comparing various existing methods to mitigate plasticity loss in continual learning. The paper makes two contributions: (1) a comparison of existing methods under a unified setting, and (2) an evaluation of and suggestions for hyperparameter selection protocols, number of seeds, and train vs. test accuracy evaluation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The study setting has significant limitations and is far from realistic for continual learning. My main concern is the relevance of the proposed recipes to more realistic continual learning scenarios. Below, I summarize different ways in which the study’s setting is limited. \n\n- Distribution shifts: The paper uses simple forms of distribution shifts, namely pixel permutation, label permutation, and noisy labels. Although these methods are frequently used in existing literature, they are highly unrealistic. Pixel permutation, for example, never occurs in real-world scenarios (and the use of CNNs for these types of images is questionable). Moreover, these distribution shifts are simple to address, as they can be resolved by adjusting weights in the first or last layer. While benchmarking is an unresolved problem for continual learning and good benchmarks are still limited in the literature, more realistic distribution shifts have been proposed in other works (e.g., see the first three environments studied in Dohre et al., 2024), which could be used in the current paper. \n\n- Test/Train sets: In a realistic continual learning setting, there is no separate test or train set. Instead, there is a single, continuous stream of incoming data, to which a model adapts. Cumulative online loss serves as the primary performance measure. \n\n- Random seeds: True continual learning settings do not involve random seeds for the same reason discussed above, especially in scenarios with sufficiently long data streams. \n\n- Hyperparameter selection: Continual learning is best achieved through continual optimization, which includes algorithms for continual hyperparameter optimization. Here, hyperparameters (e.g., learning rate) are optimized and updated at every training step, over the whole lifetime of agent. See, for example, IDBD, Hypergradient Descent, and MetaOptimize. \n\nI understand that these limitations are also present in many existing works. While evaluations in limited settings are acceptable in experimental sections for papers introducing new algorithms, such limitations are insufficient for an empirical study aiming to provide guidelines for future research. \n\nLastly, the scale of the experiments and models used is relatively small for a fully empirical study."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Why is the number of seeds and tasks different in different type of networks, same for the gradient steps?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* Provides an evidence that, there is no correlation between trainability and generalizability.\n* Shows large number of seeds are not necessary for finding good hyperparameters.\n* Provides direction on resource-constrained experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper conducts several experiments on selecting hyperparameters from the previous literatures for nonstationary optimization and demonstrates that using multiple streams of tasks for hyperparameters selection is the best approach among the commonly used protocols.\nIt also gives insight on finding configurations with good performance under low resource budget.\nIn addition, it shows that maintaining the training accuracy does not relate to a better generalizability in nonstationary optimization settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper is not well written\n - Line 142: Duplicate of 'the added'. Same line, duplicate of 'be' in 'Should be actually be used'\n - Line 153, there is no obvious connection between this and the next sentence.\n - Line 283: 'used for selection' which I think you are referring to 'used for evaluation' instead?\n - Line 375: 'HOW MANY SEEDS DO YOU TO EVALUATE A METHOD', miss a 'need'?\n - Line 722: Missing the batch size for resnet-18.\n - Figure 2 is hard to interpret. One possible improvement is to add different line shapes.\n - Figure 3 does not have the legend.\n - Figure 4's line is hard to differentiate the methods. Same as figure 2, maybe add different line shape.\n- The protocol 3 is considered to be critical to do lifelong learning. But there is no comparison between this protocol and the other protocols to prove that statement."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We conduct an empirical analysis of the methods and experimental design decisions prominent in plasticity research."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024experimental,\ntitle={Experimental Design for Nonstationary Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=55EO8gSCBT},\nnote={under review}\n}"
},
"abstract": {
"value": "Traditional methods for optimizing neural networks often struggle when used\nto train networks in settings where the data distributions change, and plasticity\npreservation methods have been shown to improve performance in such settings\n(e.g. continual learning and reinforcement learning). With the growing inter-\nest in nonstationary optimization and plasticity research, there is also a growing\nneed to properly define experimental design and hyperparameter search protocols\nto enable principled research. Each new proposed work typically adds several\nnew hyperparameters makes many more design decisions such as hyperparame-\nter selection protocols, evaluation protocols, and types of tasks examined. While\ninnovation in experiment design is important, it is also necessary to (1) question\nwhether those innovations are leading to the best progress and (2) have standard-\nized practices that make it easier to directly compare to prior works. In this paper,\nwe first perform an extensive empirical study of over 27,000 trials looking at the\nperformance of different methods and hyperparameters across different settings\nand architectures used in the literature to provide an evaluation of these methods\nand the hyperparameters they use under similar experimental conditions. We then\nexamine several core experiment design choices made by the community, affirm-\ning some while providing evidence against others, and provide concrete recom-\nmendations and analysis that can be used to guide future research."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"plasticity",
"continual learning",
"experiment design"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/baa737dca94fbdd8a9da28a7692472169b92bba9.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Experimental Design for Nonstationary Optimization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
55Ruu7bF4b | MambaFormer-MOE: Mamba-Transformer-based Mixture-of-Experts for Time Series Prediction | main | Desk Reject | mamba;transformer;mixture-of-experts;time series prediction | learning on time series and dynamical systems | Weijian Li;Han Liu | ~Weijian_Li2;~Han_Liu4 | 0 | 0 | 0 | 0 | 0 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": {
"value": "does not follow the page limit (only 4 pages of main content), and the paper is not complete with empty experimental sections."
},
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Submission Desk Rejected by Program Chairs"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nli2024mambaformermoe,\ntitle={MambaFormer-{MOE}: Mamba-Transformer-based Mixture-of-Experts for Time Series Prediction},\nauthor={Weijian Li and Han Liu},\nyear={2024},\nurl={https://openreview.net/forum?id=55Ruu7bF4b}\n}"
},
"abstract": {
"value": "We propose the MambaFormer-MOE, a mamba-based mixture-of-experts (MOEs) model for multivariate time series prediction. There are three major features of our model. 1. We propose a temporal modeling module based-on the Mamba architecture to model temporal correlations with linear complexity. 2. We propose a cross-variate correlation modeling mechanism based-on self-attention to equip Mamba with multivariate time series prediction capability. 3. We propose a MOE mechanism that has experts that specialize in mixing the variates in different ways. It makes the model generalizable to multivariate time series from different domains. Our empirical results demonstrate that our model has SOTA prediction performance on various multivariate time series datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Weijian_Li2",
"~Han_Liu4"
]
},
"authors": {
"value": [
"Weijian Li",
"Han Liu"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"mamba",
"transformer",
"mixture-of-experts",
"time series prediction"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "li|mambaformermoe_mambatransformerbased_mixtureofexperts_for_time_series_prediction"
},
"pdf": {
"value": "/pdf/ec5f4992f801fa4bfd051442cebde2acf9fae278.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MambaFormer-MOE: Mamba-Transformer-based Mixture-of-Experts for Time Series Prediction"
},
"venue": {
"value": "ICLR 2025 Conference Desk Rejected Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Desk_Rejected_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
||||||||||
55oi1LCdDL | Dual Consolidation for Pre-Trained Model-Based Domain-Incremental Learning | main | Active | Domain-Incremental Learning;Pre-Trained Model;Continual Learning | transfer learning, meta learning, and lifelong learning | 3;5;5;6;6 | 4;4;4;4;4 | 2;3;2;3;3 | 2;2;2;3;3 | 3;2;2;3;3 | 5 | 4 | 2.6 | 2.4 | 2.6 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tDUCT introduces approach to domain-incremental learning by addressing both feature and classifier forgetting simultaneously, providing a fresh perspective on solving the catastrophic forgetting problem.\n2.\tDUCT cleverly combines ideas from model merging and optimal transport. The merging of task vectors with the pre-trained model and the use of optimal transport for classifier alignment are creative applications of existing techniques to the DIL context.\n3.\tThe paper presents comprehensive experimental results across four benchmark datasets and five task orders, demonstrating the robustness and effectiveness of DUCT.\n4.\tThis paper is well-organized, with clear sections and logical flow. The methodology is explained in detail, and the experimental setup and results are presented clearly."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DUCT, a dual consolidation technique for domain-incremental learning (DIL) that effectively mitigates catastrophic forgetting. DUCT addresses the challenge of balancing knowledge across domains by Representation Consolidation and Classifier Consolidation. The paper demonstrates DUCT’s effectiveness through extensive experiments on four benchmark datasets, showing it consistently outperforms state-of-the-art methods in terms of accuracy and forgetting measure."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThere are many vision language models like CLIP that can perform zero shot. Can the author report the results of CLIP and CLIP related fine-tuning methods such as Coop, CoCoop, etc., to demonstrate the advantages of the article's method compared to these general models.\n2.\tI wonder what data is used to calculate the class center of the pretrained model? If using pretrained data such as ImageNet, the first issue is how to ensure consistency with downstream task categories to calculate class center similarity? The second question is how to reduce the overhead caused by large number of categories and data size?\n3.\tThe author should conduct experiments on more backbones to demonstrate the effectiveness of the method, such as convolutional neural networks like Resnet.\n4.\tTask similarity is calculated based on all categories, may it lead to the influence of some categories being overly magnified while the influence of others is ignored."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see the weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors analyze the current challenge of forgetting in domain incremental learning (DIL) and its underlying causes.\nThey propose a model-merging approach that demonstrates promising accuracy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The DUal ConsolidaTion (DUCT) framework addresses feature drift and forgetting by integrating historical backbones through a representation merging technique, creating stable task-specific embeddings. Additionally, DUCT’s classifier consolidation process merges calibrated and historical weights, preserving semantic class relationships and resisting forgetting, yielding strong performance on diverse benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Equations 4 and 5 attempt to build a unified embedding through weighted summation of model weights, raising questions about feasibility. Given the complexity and lack of interpretability in deep network weights, is this combination effective, or could it intensify conflicts within the feature space? More comprehensive theoretical analysis is required.\n2. While the authors suggest that DIL could benefit applications like autonomous vehicles and face recognition, their experiments focus on classification tasks. Testing on more realistic applications could be more convincing.\n3. The proposed DUCT method relies on model merging. However, as domains accumulate, previously merged models may become overly complex, containing information from multiple domains, while models from newer domains include only the latest domain data. This could lead to an imbalance between older and newer domains, creating potential confusion and forgetting.\n4. The authors tested DUCT on ViT-B/16, but other methods, like S-Prompts, report results on the more powerful CLIP backbone. Does DUCT maintain its effectiveness on a stronger backbone?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Why do you need the two stage merging? Could you just absorb the linear weights into $\\phi$ and use equation (5)?\n\nIs there anyway you could calculate an upper bound for Table1 (e.g. performance of finetuning on the union of all datasets?)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) The manuscript is well-organized despite the complicate method. \n\n(2) The method is novel.\n\n(3) Results are good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "I authors propose DUCT, a method for domain incremental learning (DIL). DIL is the setting where a sequence of tasks is presented during model finetuning. The training algorithm does not have access to data from prior tasks. The authors decompose the task overfitting problem into two components: (1) representation overfitting and (2) classifier overfitting. The authors tackle to two problems separately and propose novel model-merging-inspired techniques to solve both."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) Many notations are not introduced before hand. This makes the math hard to follow and the method ambiguous. For example, what is $\\phi_i^m$ and $\\alpha_\\phi$ in equation 4? Furthermore, equations are not introduced in the correct order. For example, Eq. 5 depends on a value that is not defined until Eq. 7. \n\n(2) It is unclear why the proposed method is better than model merging (intuitively)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.What is the impact on the results if the proposed method does not use task vectors during integration?\n2.In lines 198-200 of page 4, the authors claim that the proposed method can capture the domain-specific features of all domains. This is an interesting claim. How do the authors prove it?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.This paper is well-motivated. It is easy to understand the motivation of the method. It is reasonable to consider both feature extraction and classification in incremental learning.\n2.The experimental results show that the proposed method performs better than the previous methods, which successfully demonstrates the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is motivated by the forgetting problem of features and the mismatch problem of the classifier in domain-incremental learning. Then this paper proposes to address the above problems by unifying the historical knowledge at both the feature and classifier level. In particular, this paper proposes to merge the backbone of different stages and utilize optimal transport to adapt the classifier of old domains to both the new domain and the merged backbone. This paper conducts multiple experiments to demonstrate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The contributions are slightly limited. Integrating the models to balance different stages of tasks has been applied in incremental learning, such as [1].\n2.Many descriptions and notations are confusing. For example, in line 143 of page 3, a lack of explanation of “b” in “b|Y|”. In lines 179-185, the author seems to have used different words (features, representation, and embedding) to convey the same meaning, and I don't quite understand why the author did so. In Eq.(4), there lack of interpretation of , maxima value, and initial values in the summation notation. In line 257 of page 5, the description of “at most two backbones in memory” is confusing since I find there are at least three models (,, ) in memory according to lines 256. Finally, the authors are conflicted on which way to integrate, as shown in lines 256 and Algorithm 1.\n[1] Zheng Z, Ma M, Wang K, et al. Preventing zero-shot transfer degradation in continual learning of vision-language models[C]//Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023: 19125-19136."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. As shown in fig. 2, the initial performance of DUCT on the first domain is not optimal. Please further elaborate on this issue.\n\n2. The parameter sensitivity analysis in fig. 3(c) indicates that DUCT still achieves decent performance when the head-merge ratio $\\alpha_W$ is small. What if the ratio is set to zero? \n\n3. Can the proposed method be applied to class-incremental learning, given that it treats classes from the incoming domain as new categories?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The problem of exemplar-free domain-incremental learning is more challenging yet more practical. The authors did a good job maintaining the historical knowledge without replaying the past data.\n2. The proposed method has strong performance, achieving a significant accuracy improvement compared to existing DIL methods.\n3. The algorithm is simple, which could make a broader impact to the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the problem of exemplar-free Domain-Incremental Learning (DIL) given a pre-trained embedding model. The proposed method, DUCT, jointly consolidate historical knowledge at both the representation level and the classifier level. At the representation level, the authors modify the technique of task vectors via considering the task similarity. At the classifier level, the authors propose to retrain a new classifier, and leverage the new classifier to modify the old classifier via optimal transport. To evaluate its effectiveness, DUCT is compared with DIL baselines on four cross-domain benchmarks. DUCT achieves state-of-the-art performance on all the experiments. An ablation study as well as other analytical experiments are reported to provide a more in-depth analysis of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The major concern is that, as shown in the ablation study in table 2, the main reason for the accuracy boost in DUCT could be attributed to task vector, which is an existing technique for addressing multiple tasks simultaneously. In reviewer's opinion, despite that the authors make certain modification on the weighting strategy, the paper fails to provide new insights to this technique on why it is effective in addressing DIL problem. One possible aspect the reviewer can think of is to explain why applying DUCT 'places the same class of different domains together', as suggested in the visualization in fig. 5.\n\n2. The notation in the paper needs improvement. First, in equation 5, $\\phi^m_i$ should not use $i$ as subscript as it indicates the index of the summation. Second, $\\beta$ and $\\gamma$ should be explained once they appear in line 300."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dual,\ntitle={Dual Consolidation for Pre-Trained Model-Based Domain-Incremental Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=55oi1LCdDL},\nnote={under review}\n}"
},
"abstract": {
"value": "Domain-Incremental Learning (DIL) involves the progressive adaptation of a model to new concepts across different domains. While recent advances in pre-trained models provide a solid foundation for DIL, learning new concepts often results in the catastrophic forgetting of pre-trained knowledge. Specifically, sequential model updates can overwrite both the representation and the classifier with knowledge from the latest domain. Thus, it is crucial to develop a representation and corresponding classifier that accommodate all seen domains throughout the learning process. To this end, we propose DUal ConsolidaTion (Duct) to unify and consolidate historical knowledge at both the representation and classifier levels. By merging the backbone of different stages, we create a representation space suitable for multiple domains incrementally. The merged representation serves as a balanced intermediary that captures task-specific features from all seen domains. Additionally, to address the mismatch between consolidated embeddings and the classifier, we introduce an extra classifier consolidation process. Leveraging class-wise semantic information, we estimate the classifier weights of old domains within the latest embedding space. By merging historical and estimated classifiers, we align them with the consolidated embedding space, facilitating incremental classification. Extensive experimental results on four benchmark datasets demonstrate Duct's state-of-the-art performance."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Domain-Incremental Learning",
"Pre-Trained Model",
"Continual Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/bb0c301b14ee88215686047d478c1064cd3624aa.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Dual Consolidation for Pre-Trained Model-Based Domain-Incremental Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
55pCDKiS8B | Elucidating the Preconditioning in Consistency Distillation | main | Active | Diffusion Models;Distillation;Consistency Trajectory Models | generative models | 6;6;6;8 | 3;2;3;3 | 3;3;3;3 | 2;3;2;3 | 3;2;3;3 | 6.5 | 2.75 | 3 | 2.5 | 2.75 | 0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "From line 265-266 the authors mentioned that the parameter $l_t$ in \"Analytic-Precond\" is chosen to be the minimizer of the expected gradient norm $E_{q(x_t)}[\\|\\nabla_{x_t}g_{\\phi}(x_t,t)\\|_F]$ based on earlier work [1]. Would it be possible for the authors to further expand on why such choice ensures the robustness of the resulting ODE again errors in $x_t$? Which section/part of [1] discussed the reason behind such choice?\n\nReferences:\n\n[1] Zheng, Kaiwen, Cheng Lu, Jianfei Chen, and Jun Zhu. \"Improved techniques for maximum likelihood estimation for diffusion odes.\" In International Conference on Machine Learning, pp. 42363-42389. PMLR, 2023."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Complete proofs are included for each proposition in the manuscript.\n2. Extensive numerical experiments are provided to validate the effectiveness of the proposed methodology."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a general paradigm of preconditioning design in consistency distillation, which is a common technique used for accelerating the inference time of consistency models based on teacher-student training (i.e., knowledge distillation). Specifically, this paper focused on preconditioning, which is a vital technique for stabilizing consistency distillation. Compared to previous hand-crafted choices of preconditioning, this paper proposed a principled way called \"Analytic-Precond\" to analytically optimize the preconditioning based on the consistency gap associated with the teacher probability flow ODE. Numerical experiments on multiple datasets are included to justify the effectiveness of \"Analytic-Precond\"."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Presentation of the manuscript can be further improved by rewriting certain phrases and expanding on some technical details. For instance, the phrase \"CMs aim to a consistency function\" on line 134 might be better rephrased as \"CMs aim to learn a consistency function\". For possible ways of explaining technical details in a better way, one may refer to the \"Questions\" section below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**Theoretical Innovation in Preconditioning**: The paper introduces \"Analytic-Precond,\" a novel, analytically derived preconditioning method that theoretically optimizes the consistency distillation process. This goes beyond prior handcrafted preconditionings, offering a principled approach that minimizes the consistency gap between the teacher and student models. This theoretical grounding not only strengthens the methodology but also provides new insights into consistency distillation.\n\n**Significant Training Acceleration**: Experimental results show that Analytic-Precond achieves 2-3x faster training in multi-step generation tasks on standard datasets. This improvement in speed is impactful, especially for resource-intensive applications of diffusion models, as it directly addresses the bottleneck of slow inference that has historically limited diffusion models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper titled \"Elucidating the Preconditioning in Consistency Distillation\" examines consistency distillation techniques for diffusion models, where a student model learns to follow the probability flow trajectory set by a teacher model. This distillation accelerates generation by reducing the inference steps. The paper specifically explores preconditioning, a method that combines input data with network outputs to improve stability during training. Traditionally, preconditioning has been handcrafted, but this paper introduces a theoretically optimized method named \"Analytic-Precond.\" This new approach minimizes the gap between teacher and student denoisers, thereby improving training efficiency and trajectory alignment. Experimental results demonstrate that Analytic-Precond achieves up to 3x acceleration in training across various datasets, indicating its potential in enhancing consistency models for faster multi-step generation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- This paper does not provide whether BCM is better than CTM+ Analytic-Precond in terms of FID.\n- Analytic-Precond does not perform better when GAN is incorporated into the CTM. Can the authors provide an explanation or intuition for this?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- The paper focuses on the case where $f=0, g=\\sqrt{2t}$ because of the recent literature. Is the method still applicable for other choices of $f$ and $g$?\n- As far as I understand, the whole discussion depends on finding a good discretization of ODE (2). Both (9) and (13) use first order (Euler) methods. Can we get more insight if we try to use a better integrator?\n\nMinor:\n- Why is $q_T$ on line 118 a 0 mean Gaussian? Do we have some condition on $\\mathbb{E}[q_0]$?\n- The $\\lambda_t$ below equation (11) might be confused with $\\lambda(t)$ in equations (5), (7).\n- x$ and $x_t$ are used interchangeably in the RHS and LHS of the equations, better to be consistent. Example: line 182, line 276.\n- Is the code of the experiments released?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents strong mathematical arguments to support the choice of coefficients, including an explanation for the CMT choices that is not just based on intuition as previous methods.\n- The paper shows numerical proofs of the claims made, underlying when _Analytic-Precond_ offers no advantage (single step) and when it does (two or more steps)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper works on _Preconditioning_, a technique used in the consistency distillation of diffusion models to obtain consistency functions that directly satisfy the boundary conditions required by the problem. Preconditioning consists in linearly linking the input to the output of a network. In the literature, the choice of linear coefficients is based on intuition. The paper introduces instead a new analytical method, called _Analytic-Precond_, for setting the coefficients. The method consists in applying a parametric discretization of the probability flow ODE, and then optimizing the parameters by minimizing the gap between the optimal student and the teacher, while keeping the discretization as robust as possible. Finally, some numerical proofs show that the derived result leads to a speed-up in the inference of diffusion models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper might be a little hard to read for who is not familiar with distillation. I personally took a while to grasp the setting and all the notation. For example, $\\phi$ is used many times before definition. It could be worth having a brief discussion about some nomenclature like _teacher_ & _student_."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How does performance and the actual values for the preconditioners change as we change the number of samples used to estimate Eqs. 15 and 17 (expressions for the preconditioners)? Results in the paper are obtained estimating them for 120 different times, using 4096 samples to estimate expectations. This yields good results for 2 step sampling, at a modest computational cost. This is definitely not strictly necessary, but I think it could be interesting to see how the performance and preconditioner values change as we change the number of samples used. The estimators used have variance and bias, both of which decrease with the number of samples. How small can we make the number of samples used while still retaining good performance? Can we further boost performance using more samples?\n\nThis is all for distillation. Did you think about consistency training? These preconditioning parameters also show up in that case, but we don’t have access to the teacher, so it is unclear how to use the ideas in this work.\n\nWhile multi-step consistency models have been observed to underperform CTMs, it would be nice to have values in some of the results reported. I understand training is exactly the same, so I’m not expecting the new preconditioners to help there. But it would be nice having plain multi-step CMs in some results.\n\nWhat dataset is used for the GAN experiment?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Consistency (trajectory) distillation represents an extremely popular approach for fast generation, solving the main drawback of diffusion models. Finding approaches to improve distillation, either the final performance achieved or its training cost, is extremely relevant to improve the practicality and applicability of large models currently being trained in multiple domains.\n\nThis paper tackles this problem by changing the preconditioning parameters used for the neural network. To my knowledge the procedure described in the paper is novel, and empirical results show that, for CTMs, the proposed approach yields a training speedup of 2x for multiple datasets.\n\nThe final proposed method is quite simple to implement, with closed-form expressions for the preconditioners. (However, these expressions involve nonlinear functions intractable expectations which are estimated with samples, meaning that the actual values obtained have both bias and variance.)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Consistency (trajectory) distillation typically uses a network parameterized as $f(x, t, s) = \\alpha_{t, s} F_\\theta(x, t, s) + \\beta_{t, s} x$, with specific expressions for the coefficient (i.e. preconditioners) $\\alpha_{t, s}, \\beta_{t, s}$ so that the boundary conditions are automatically satisfied. This paper proposes an efficient method to find alternative preconditioners that yield improved performance and faster training. They derive the expressions for the preconditioners by re-expressing the underlying ODE in terms using certain additional variables and propose simple objectives whose minimizers can be found analytically to set the values for these variables."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper deals with the consistency (trajectory) distillation case. In practice, a very relevant alternative that is also widely used (and also uses similar preconditioners for the network) is consistency training. That is, directly training fast samplers without distilling a base model. The paper does not address this problem at all. While I understand this is not necessary, and distillation in itself is an important task, it would be interesting to see whether certain ideas from this work can be extended to consistency training. (Most of the expressions for the preconditioners rely on having a pre-trained model, so unclear how to generalize this approach, if possible.)\n\nThe approach seems to help when using CTM with 2 steps or more. For single step generation performance and training curves pretty much overlap with and without the proposed approach. 1 step generation plays an important role for real time generation, and it would be very interesting to develop methods that can improve training and final performance in that setting as well. Additionally, the fact that the derived preconditioners are essentially the same as the ones naively used by consistency (trajectory) distillation raises the question of whether there are other preconditioners that can be used that might help in this case as well. Again, not exploring this does not reduce the paper’s merit, but I think it is an interesting question too.\n\nThe method does not yield any benefits when used in concert with a GAN auxiliary loss. Using this loss has been observed to lead to improved performance, and indeed, the best results reported in the paper are using the GAN loss, if I understand correctly. The proposed approach does not yield benefits (but at least does not hurt) when using this auxiliary loss."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024elucidating,\ntitle={Elucidating the Preconditioning in Consistency Distillation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=55pCDKiS8B},\nnote={under review}\n}"
},
"abstract": {
"value": "Consistency distillation is a prevalent way for accelerating diffusion models adopted in consistency (trajectory) models, in which a student model is trained to traverse backward on the probability flow (PF) ordinary differential equation (ODE) trajectory determined by the teacher model. Preconditioning is a vital technique for stabilizing consistency distillation, by linear combining the input data and the network output with pre-defined coefficients as the consistency function. It imposes the boundary condition of consistency functions without restricting the form and expressiveness of the neural network. However, previous preconditionings are hand-crafted and may be suboptimal choices. In this work, we offer the first theoretical insights into the preconditioning in consistency distillation, by elucidating its design criteria and the connection to the teacher ODE trajectory. Based on these analyses, we further propose a principled way dubbed \\textit{Analytic-Precond} to analytically optimize the preconditioning according to the consistency gap (defined as the gap between the teacher denoiser and the optimal student denoiser) on a generalized teacher ODE. We demonstrate that Analytic-Precond can facilitate the learning of trajectory jumpers, enhance the alignment of the student trajectory with the teacher's, and achieve $2\\times$ to $3\\times$ training acceleration of consistency trajectory models in multi-step generation across various datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion Models",
"Distillation",
"Consistency Trajectory Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/727b5f6a454c81074ed5ea2f7f27837d64100ce3.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Elucidating the Preconditioning in Consistency Distillation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
562B7aLi5X | Binary Losses for Density Ratio Estimation | main | Active | density ratio estimation;domain adaptation;composite binary losses;class probability estimation | learning theory | 3;3;6;6;8 | 2;2;3;3;2 | 2;3;3;3;4 | 1;1;3;3;3 | 2;2;2;3;3 | 5.2 | 2.4 | 3 | 2.2 | 2.4 | 0.336861 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could you briefly outline the most significant theoretical contribution of your paper and the challenges involved in achieving it?\n\n2. Could you explain the intuition behind the flatness of your method in Figure 2? If the method aims to estimate the ratio accurately for high values, shouldn’t it follow the top of the curve closely? Furthermore, in the lower row of the figure for $\\alpha=0.01$, I am not sure I understand why one would prefer your estimate over KuLSIF.\n\n3. Since you mention that standard methods prioritize estimating smaller values and you focus on higher values, what happens if one applies standard methods to the inverse ratio, i.e., estimating $dQ/dP$?\n\nTypo: In footnote 2, the next-to-last equation contains probabilities that should be conditioned on $x$: it should be $\\rho(y=1∣x)\\rho(x)$ instead of $\\rho(x,y=1)$."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper appears to be self-contained and introduces all the tools and notation it uses.\n\n2. The experimental results are fairly extensive for a theoretical paper.\n\n3. The theoretical results, especially Lemma 4 in the appendix, seem to rely on non-trivial applications of several previously established results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors characterize the set of loss functions that, when used in density ratio estimation for binary classification, lead to the minimization of a particular Bregman divergence. This approach is motivated by the observation that some commonly used losses (such as exponential or logistic) yield density ratio estimates that minimize a similar Bregman divergence expression. After identifying these losses, they design a new family of losses aimed at accurately estimating large values of the density ratio, in contrast to standard losses that focus on estimating small ratio values. They apply these designed losses to estimate density ratios in Gaussian RKHS and for unsupervised domain adaptation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation of the paper could be significantly improved. The paper uses heavy notation — for example, understanding $B_{-\\underline{L}^\\circ}(\\beta,g\\circ f)$ requires substantial effort. Another clear example is Remark 1, which is completely incomprehensible unless the reader is already familiar with everything it covers.\n\n2. The novelty of this work is unclear. I believe the authors would agree that the main contribution of this paper is theoretical and primarily represented by Theorem 1. However, given Remark 1, it is not evident how substantial this theorem’s contribution really is.\n\n3. It is not clear why one should focus on minimizing equation (1). According to the beginning of Section 2, there are \"many\" density ratio estimation methods that lead to minimizing (1), with four examples provided. What does \"many\" mean in this context, and why should one limit themself to this specific type of minimizers?\n\n4. Some parts of the experimental results, like Section 6.2, contain too much irrelevant information for readers, making it easy to miss the main points. I would consider moving some of this information to the appendices."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Considering major weaknesses 1 and 2 discussed above, could you provide more additional discussions to clarify the novel contributions of your study?\n- Considering major weaknesses 3 and 4 discussed above, could you provide further detailed information to elucidate the effectiveness of your approach discussed in Section 5?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "References to existing research are sufficiently provided."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present theoretical results characterizing strictly proper binary loss functions that lead to minimizers of Bregman divergences in probability density estimation. According to these theoretical results, they propose a novel loss function that prioritizes accurate estimation of large density ratio values over smaller ones. They also empirically validate the effectiveness of their proposed loss function through numerical experiments, demonstrating that the novel loss function can lead to improvements in parameter selection for domain adaptation tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "#### Major Weaknesses:\n1. There are concerns regarding the novelty of the theoretical results presented in this study. Specifically, results such as the necessity of Equation (8) in Theorem 1 appear to be easily derived from findings in prior work referenced by this study ([1], [2], and [3]). A detailed examination of this issue is provided below.\n2. Additionally, the canonical form of the density ratio link, given in Equation (10), does not constitute a new result, as it can be derived from results presented in prior studies ([1]). A detailed examination of this issue is provided below.\n3. The proposed loss function, derived from Equation (11) in Section 5, lacks a clear connection to the theoretical results previously established in Section 4.\n4. Moreover, it is unclear how the proposed loss function specifically addresses shortcomings associated with existing loss functions. The authors are encouraged to include mathematical analysis, such as theorems, to clarify the properties of the proposed loss function.\n\n#### Minor Weakness:\n5. Figures 2 and 3:\n - The axis titles are missing, making it difficult to interpret the graphs.\n - In particular, Figure 3 lacks explanatory labels for each axis, which are needed to understand these experimental results.\n\n\n---\nHereafter, details of the major weaknesses, specifically Weaknesses 1 and 2, are discussed.\n\n#### About major weakness 1:\nEquation (8) can be derived as follows:\n1. From Theorem 4 in [2], we know that $B_{\\phi} = B_{\\phi'}$ for any $\\phi'$ with $\\phi'(y) = \\phi(y) + c_2 y + c_1$, where $c_2$ and $c_1$ are constants. This fact implies that terms such as $\\hat{\\eta} c_2 $ and $c_1$ in the definition of $\\gamma(\\cdot)$ (line 236) are redundant.\n2. From Theorem 4 in [1], we know that $L(\\eta, \\mu) = \\underline{L}(z) + (\\eta - \\mu) \\underline{L}'(\\mu)$.\n3. Additionally, we have $\\underline{L}(\\eta) = - \\phi(\\eta)$ because $\\underline{L} = L(\\eta, \\eta) = \\eta l_1(\\eta) + (1 - \\eta) l_2(\\eta) = \\gamma(\\eta) = - \\phi(\\eta)$.\n4. Thus, $L(\\eta, \\Psi^{-1}(y)) = - \\phi(\\Psi^{-1}(y)) - (\\eta - \\Psi^{-1}(y)) \\phi' (\\Psi^{-1}(y))$, where Equation (8) represents the cases for $\\eta = 0$ and $\\eta = 1$ in this equation.\n\n#### About major weakness 2:\nFrom Corollary 3 in [1] and the discussion in Section 6.1 of [1], it follows that $(g^{-1}_{can})' (c) = w(c) = - \\underline{L}''(c) = \\phi''(c)$. There appears to be no significant difference between Equation (10) and this equation.\n\n---\n\n[1] Reid, M. D., & Williamson, R. C. (2010). Composite binary losses. The Journal of Machine Learning Research, 11, 2387-2422.\n\n[2] Reid, M. D., & Williamson, R. C. (2011). Information, Divergence and Risk for Binary Experiments. Journal of Machine Learning Research, 12(3).\n\n[3] Menon, A., & Ong, C. S. (2016, June). Linking losses for density ratio and class-probability estimation. In International Conference on Machine Learning (pp. 304-313). PMLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No details of concerns beyond the above."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is the existence of the density ratio always guaranteed? For example, if the two densities have no overlapping support, in which case, the definition seems to fail. Can the proposed method perform a good estimation?\n2. If one distribution has a light tail and the other a heavy tail, how would that impact the estimation?\n3. It seems the author considered only a one-dimensional case in the experiment, how about a multi-dimensional case when $d>0$? This is more common in covariate shift problems.\n4. What are the biggest difficulties and challenges for deriving the the sample complexity of the proposed methods?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Compared to the related literature, this paper introduces a new framework for constructing novel loss functions, prioritizing an accurate estimation of large density ratio values over smaller ones.\n2. It provides a thorough mathematical foundation, characterizing the types of loss functions that align with specific error measures derived from Bregman divergences. The comparison with the related literature is good.\n3. The work shows large practical implementation through empirical data and real application in deep domain adaptation. The simulation work is extensive to demonstrate the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the challenge of estimating the ratio of two probability densities from observations. Typically, this is done using binary classifiers, but the efficacy of the estimators is significantly affected by the choice of the binary loss function used. The authors characterize loss functions that result in statistically favorable density ratio estimations, particularly focusing on achieving low errors in large density ratio values—a departure from classical approaches that perform well on small values. They introduce novel loss functions and demonstrate their application in parameter selection for deep domain adaptation tasks. Numerical experiments and real-world applications illustrate the practical benefits of these loss functions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not delve into the sample complexity of the proposed methods, which could be critical for understanding their efficiency in various scenarios.\n2. While it improves estimation for large density values, the impact on performance for smaller values isn't thoroughly explored.\n3. A more detailed introduction of the experiments should be considered."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Theorem 1 extends previous results. What is the main challenge in extending these, and what is the key novelty in the proof?\n2. Table 1 shows that EW consistently performs best for Amazon Reviews under Importance Weighted Aggregation. Is there any intuition behind this outcome?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The theoretical analysis appears solid, and several real datasets are used to demonstrate the proposed loss function. The authors provided detailed comparison between new results and previous analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work examines the estimation of the Radon-Nikodym derivative, which is the ratio of two probability densities. In classical algorithms, an incorrect choice of binary loss function can lead to biased estimates. The author first derived the necessary properties for an appropriate loss function. Based on this analysis, novel loss functions were proposed, demonstrating improved parameter selection in both simulated and real data examples."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the author discussed how their results improve upon previous work, they did not elaborate on how their proof differs from prior analyses. Consequently, the challenges of the proof, as well as the novelty and contributions of the theoretical analysis, remain unclear. Additionally, the writing could be improved; including more high-level explanations of the motivation and results would make it easier to follow."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In the conclusion, you mention that the sample complexity of these tasks is not known. Do you mean to say that the empirical risk minimizer of the loss converges to the minimizer of the Bregman divergence only as the number of samples goes to infinity, but we do not know finite-sample error bounds for the empirical minimizer (similar to how in PAC learning theory, this would correspond to an additional \"complexity of F\"/#samples error term)?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The characterization provided by the authors significantly completes the picture laid out in prior work by Menon and Ong 2016, and work by Reid and Williamson (see Remark 1 for specifics). The motivation for considering the inverted problem is convincing---one can imagine ascribing certain desired properties of an estimator to the minimizer of a Bregman divergence, and thereafter, using the characterization derived in the paper, obtain the correct loss function to minimize on the data that realizes the minimization of this Bregman divergence (and hence has the desried property). The authors specifically consider the property \"small errors on large density ratios\", and obtain strong empirical results for minimizing the loss functions through their characterization. In my view, this is a valuable contribution that enhances our toolbox for estimating density ratios in a principled manner. The writing of the paper is also geneally good, although at some places, it becomes dense with a lot of assumed context."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "A standard technique to estimate the ratio between densities of two probability distributions $P$ and $Q$ from samples from each of the distributions is to train a binary classifier that distinguishes the samples. Namely, one labels samples from $P$ with the label $1$, and samples from $Q$ with the label $-1$, and empirically minimizes a loss function on these samples. Depending on the loss function that is chosen, it is known that the as the number of samples tends to infinity, the empirical minimizer effectively minimizes a certain Bregman divergence between the true density ratio and the estimated density ratio.\n\nHowever, the convex potential of this Bregman divergence depends on the chosen loss function. Namely, the Bregman divergence to the true density ratio that our estimator ends up minimizing depends heavily on the loss function that we chose. As it turns out, most commonly used loss functions (like the logistic loss), correspond to Bregman divergences that do not appropriately penalize discrepancies at large density ratio values, instead penalizing density ratio errors at smaller values---this might lead to suboptimal density ratio estimates in many applications.\n\nThis paper takes an inverted approach: given a convex potential $\\phi$ (and a ``probability link function'' $g$), the paper characterizes a unique loss function $l_{\\phi, g}$, such that upon empirically minimizing $l_{\\phi, g}$, as the number of samples grows, the Bregman divergence with potential $\\phi$ is minimized. The paper furthermore proposes a canonical link function $g$, which induces a convex loss function $l$, and is hence computationally amenable to empirical risk minimization.\n\nOne convenient application of the characterization in this work is that it gives a way for a practitioner to design a loss function that would have the properties they desire. For example, as elaborated in Section 5 by the authors, if one cares about penalizing errors in larger density ratios more than errors in smaller density ratios, one can specify a potential $\\phi$ for a Bregman divergence $D_\\phi$ that ensures this, and thereafter using the characterization in the paper, obtain the associated loss function $l_\\phi$. If one now minimizes $l_\\phi$ on the samples, then in the limit, one would be minimizing $D_\\phi$ (which was chosen so as to prioritize accurate estimation of large density ratios). In particular, the authors specifically propose two convex potentials $\\phi$ for this purpose--the Exponential Weight (EW) function and polynomial weight functions, and derive the associated loss functions from their characterization.\n\nFinally, the authors empirically validate minimizing these loss functions as compared to the standard loss functions on a variety of synthetic as well as real-world datasets. They show that minimizing their loss functions leads to better performance on importance weighting tasks on a range of datasets. The experimental evaluation appears quite thorough and extensive."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Up until you mention your first contribution, the reader has only looked at the pseudocode of Algorithm 1, which uses a probability link function $\\Psi$. There has been no mention of $g$ as yet. Correct me if I am wrong, but my understanding is that $g(x)$ in Algorithm 1 is simply $\\Psi^{-1}(x)/1-\\Psi^{-1}(x)$---it would be helpful to at least mention this before introducing \"Density ratio link $g$\" in line 78. Because otherwise, the reader, who has just seen $\\Psi$ in Algorithm 1, is a little confused about where $g$ sprang out of nowhere, and how it is relevant.\n\n---\n\nMinor/typos: \\\nLine 78: I believe the loss function for an arbitrary $g$ is not convex, but only strictly roper composite (the loss function for the canonical $g$, as stated in the next sentence, is convex).\n\nLine 273: I believe in the denominator, there is a typo (should be $g_{can}$ instead of $g$)"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose novel loss functions for classifier-based density ratio estimation, that are characterized by minimizing a prescribed Bregman divergence between the density ratio and the constructed estimator."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024binary,\ntitle={Binary Losses for Density Ratio Estimation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=562B7aLi5X},\nnote={under review}\n}"
},
"abstract": {
"value": "Estimating the ratio of two probability densities from finitely many observations of the densities, is a central problem in machine learning and statistics. A large class of methods constructs estimators from binary classifiers which distinguish observations from the two densities. However, the error of these constructions depends on the choice of the binary loss function, raising the question of which loss function to choose based on desired error properties.\n\nIn this work, we start from prescribed error measures in a class of Bregman divergences and characterize all loss functions that lead to density ratio estimators with a small error. Our characterization provides a simple recipe for constructing loss functions with certain properties, such as loss functions that prioritize an accurate estimation of large values. This contrasts with classical loss functions, such as the logistic loss or boosting loss, which prioritize accurate estimation of small values. We provide numerical illustrations with kernel methods and test their performance in applications of parameter selection for deep domain adaptation."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"density ratio estimation",
"domain adaptation",
"composite binary losses",
"class probability estimation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/14bcf6426bd1b895b8744fd0eac55b7db9b5013c.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/2259455d785f99cca7cbbf7ec9001b791dab4a4b.zip"
},
"title": {
"value": "Binary Losses for Density Ratio Estimation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
56Zn3halhq | Learning Augmentation Policies from A Model Zoo for Time Series Forecasting | main | Withdraw | Time Series Forecasting;Data Augmentation | learning on time series and dynamical systems | Haochen Yuan;Xuelin Li;Yunbo Wang;Xiaokang Yang | ~Haochen_Yuan1;~Xuelin_Li2;~Yunbo_Wang2;~Xiaokang_Yang1 | 3;5;5;5 | 4;4;4;4 | 3;4;2;2 | 2;2;3;2 | 3;4;3;2 | 4.5 | 4 | 2.75 | 2.25 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Presents novel ideas for data augmentation. Rather than treating all data equally for augmentation, introduces the concept of identifying \"marginal samples\" that would benefit most from augmentation.\nA comprehensive empirical validation across multiple datasets and models and a thorough ablation studies examining key components is presented.\nOverall well-structured presentation progressing from motivation to implementation.\nThe design choices are well motivated."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel data augmentation method for time series forecasting. More specifically, the proposed method\nleverages a model zoo of pretrained forecasting models to identify the so called\"marginal samples\" - training instances where models show a high prediction diversity. Focusing augmentation on these marginal samples is more effective than uniform augmentation across all data. To learn the augmentation policy the method uses a variational masked autoencoder (V-MAE) as the base augmentation model. They applies REINFORCE algorithm to optimise the augmentation policy using model zoo prediction variance as feedback. The goal is to generate augmented data that reduces prediction variance across models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concern with this paper is that modest gains in results don't seem to justify the expensive and complicated method proposed. The requirement of multiple pre trained models itself is quite expensive. The training of the augmentation policy seems quite compute intensive. The performance gains are marginal and are primarily driven by the base transformer. On modern transformer based forecasting methods such as such as patchtst and itransformer the gains are marginal and it even underperforms in some cases. Overall, I think the performance gains don't justify the computation expenses required."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I am not sure why there is no discussion of outlier and out-of-distribution detection or concepts introduced in the data cleaning literature. I think it is crucial to see the contribution and position of this work in those literatures and provide discussions for the proposed method against the approaches in those literatures."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Normalizing challenging, high-diversity samples rather than removing them is a notable departure from traditional outlier detection and data cleaning practices. The use of reinforcement learning to guide this normalization process adds further novelty, as it enables the model to learn an optimal augmentation policy that reduces prediction variance across a model zoo."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces a novel data augmentation method, AutoTSAug, that transforms high-diversity or marginal samples in time series data to better align with standard patterns, using reinforcement learning to guide the transformations. By leveraging a model zoo, the method identifies challenging samples with high prediction variance and applies a variational masked autoencoder to generate augmented, normalized versions of these samples. This approach aims to reduce prediction error variance and improve model stability by effectively normalizing outliers rather than removing them."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "A key weakness of the paper lies in its lack of engagement with the outlier detection and data cleaning literature, which limits the reader's ability to understand the contribution in context."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) How sensitive is the method to the choice of models in the zoo? What criteria should be used for model selection?\n2) Why not consider augmenting ‘‘hard samples’’ that consistently perform poorly across the model zoo? Perhaps, augmenting these hard samples may help the models uncover their underlying patterns, and improve their performance?\n3) How does the approach ensure that minimizing variance doesn’t lead to uniformly poor samples? For instance, if the variance is very high, and the agent gives more importance to the variance criterion.\n4) What advantages does REINFORCE offer over alternative policy optimization methods like PPO or TRPO?\n5) How does the method compare to approaches with theoretical guarantees like Recursive Time Series Data Augmentation (RIM)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The use of model zoo diversity to identify samples for augmentation.\n- Interesting combination of VAE and RL.\n- Comprehensive empirical validation across multiple datasets.\n- Thorough ablation studies supporting design choices.\n- Practically useful with reasonable computational overhead."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents AutoTSAug, a novel data augmentation method for time series forecasting that uses reinforcement learning to learn optimal augmentation policies. The key innovations are: (1) using a ‘‘model zoo’’ of pretrained forecasting models to identify ‘‘marginal samples’’ that would benefit most from augmentation, and (2) employing a variational masked autoencoder trained with REINFORCE to generate augmented data that reduces prediction variance across the model zoo. The method shows consistent improvements over baselines across multiple datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited theoretical justification for focusing exclusively on high-variance samples. Could you provide a more formal theoretical justification of this claim? Or empirically prove that augmenting low variance samples (hard ones) is not beneficial?\n- Potential oversight of valuable transformations for low-variance samples. Could you apply your augmentation framework to augment hard samples, with a different reward function that would improve the forecasting results on these samples?\n- Over-reliance on model zoo’s diversity criterion without stability analysis.\n- Risk of generating uniformly poor samples due to variance-based reward. Are there any safeguards to prevent generating uniformly poor samples (e.g. variance >> generation error)?\n- Limited comparison with state-of-the-art augmentation methods.\n- Missing analysis of hard samples that consistently perform poorly.\n- Insufficient justification for choosing REINFORCE over other RL algorithms."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Are the marginal samples consistant across different training instances of the same model? E.g. if the model zoo is initialized with different parameters or trained with different hyperparameters does it affect which samples are considered marginal?\n- Since the paper proposes an RL based training approach, does the proposed method use a multi-step training approach where the initial recontruction is fed into the encoder as the new \"state\" and policy model would then further augment the sample. Or do the proposed method use a single step approach and if so is that enough to significantly modify the samples towards the reward function?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The paper proposes a interesting perspective on data augmentation by focusing entirely on high prediction variance samples.\n- The paper is generally well written and the methods and results are presented well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a data augmentation method for time series prediction. The paper focuses on augmenting \"marginal samples\" which are samples that has high prediction variance across a variety of different prediction models. The paper then propose a generative approach using V-MAEs to augment the marginal samples and to train the generative model via a reinforcement learning approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In the Related Works section, the paper presents other generative based and RL-based methods for time-series data augementation yet only compares their proposed method with the Gaussian noise augmentor. It would be nice to see more baseline comparisons (at least one each from generative based and RL-based methods).\n- It is not super convincing that augmenting only marginal samples results in consistantly significant improvements for the prediction model, as most of the results presented in Table 2 having a <5% improvement with the augmented data, with the biggest improvement coming from the basic Transformer model.\n- Moreover, it is not entirely clear that AutoTSAug is able to consistantly morph the marginal samples into samples that exhibit lower prediction variance in the model zoo."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nyuan2024learning,\ntitle={Learning Augmentation Policies from A Model Zoo for Time Series Forecasting},\nauthor={Haochen Yuan and Xuelin Li and Yunbo Wang and Xiaokang Yang},\nyear={2024},\nurl={https://openreview.net/forum?id=56Zn3halhq}\n}"
},
"abstract": {
"value": "Time series forecasting models typically rely on a fixed-size training set and treat all data uniformly, which may not effectively capture the specific patterns present in more challenging training samples. To address this issue, we introduce AutoTSAug, a learnable data augmentation method based on reinforcement learning. Our approach begins with an empirical analysis to determine which parts of the training data should be augmented. Specifically, we identify the so-called marginal samples by considering the prediction diversity across a set of pretrained forecasting models. Next, we propose using variational masked autoencoders as the augmentation model and applying the REINFORCE algorithm to transform the marginal samples into new data. The goal of this generative model is not only to mimic the distribution of real data but also to reduce the variance of prediction errors across the model zoo. By augmenting the marginal samples with a learnable policy, AutoTSAug substantially improves forecasting performance, advancing the prior art in this field with minimal additional computational cost."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Haochen_Yuan1",
"~Xuelin_Li2",
"~Yunbo_Wang2",
"~Xiaokang_Yang1"
]
},
"authors": {
"value": [
"Haochen Yuan",
"Xuelin Li",
"Yunbo Wang",
"Xiaokang Yang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Time Series Forecasting",
"Data Augmentation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "yuan|learning_augmentation_policies_from_a_model_zoo_for_time_series_forecasting"
},
"pdf": {
"value": "/pdf/9e595c3ddf091f660ff8f59c7f419a54c803a385.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/2d75f7fda7bdaf1512fc9ff89d64c2d85d288fe1.zip"
},
"title": {
"value": "Learning Augmentation Policies from A Model Zoo for Time Series Forecasting"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
56mg1JFd3n | Writing in the Margins: Better Inference Patterns for Long-Context Retrieval | main | Active | chunked prefill;long context inference;interactive inference | generative models | 3;5;6;10 | 2;4;2;4 | 1;2;3;3 | 1;2;3;4 | 2;3;3;4 | 6 | 3 | 2.25 | 2.5 | 3 | 0.588348 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) One approach the authors could explore would be to use a separate smaller LLM as classifier. Using the base model (which can be very large) adds latency."
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper provides a number of thought provoking outcomes.\n1) It showcases how adding a simple strategy of adding notes or summaries in the \"margins\" after each prefilled chunk can assist in improving LLM reasoning and retrieval capabilities. \n2) The notes written by the LLM can potentially be used to improve explainability of the final decoded output. This is dependent on whether the question asked for the margin generation is useful. In the paper the authors ask the LLM whether the context is relevant to the query (and to provide a summary).\n3) The approach is general purpose, it can be applied to any LLM without the need for finetuning which is a big win. \n\nOverall, strong contribution."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a new inference methodology called \"writing in margins\" for long context tasks. The method builds upon the chunked prefill strategy (commonly used while dealing with long contexts to avoid the quadratic growth of memory), dividing long input contexts into manageable segments and generates \"margins\" or intermediate summaries for each chunk. \nThe margins are then classified by the same LLM as useful or not-useful and useful margins are kept as part of the context and used during decoding step. \nThe approach seems to significantly help LLM (especially smaller LLMs) in better accuracy during decoding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) Latency - while the authors mention that latency is slightly increased, an ablation study for this would be welcome. Since the paper uses 2 steps for each chunk - margin generation and then margin classification, you are effectively doing 2 decoding steps for the model with each chunk. This will add latency, especially if the summaries generated are long.\n\n2) comparison against finetuned models - the paper mentions that this technique the models to perform well on tasks (long context) without the need to finetune the model (similar to rag). It would be good to include a model finetuned for the task and using the standard Long Context LLM decoding approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* The main results in Table 4 present many metric values repeatedly measured using a fixed dataset and multiple algorithms. No statistical significance tests are shown. This severely compromises the integrity of the results. Were these tests conducted—with appropriate corrections for multiple comparisons—but not reported?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* **Interesting approach to query-specific representation expansion.** Bootstrapping decision-making with model information (e.g., writing the margins) is a compelling way for a model to guide itself toward a better response. \n* **Focus on effectiveness and efficiency.** The authors discuss both the effectiveness of their method and how it can improve the efficiency during decoding. \n* **Extensive experimentation.** Notwithstanding concerns (below), testing the approach on multiple settings and across multiple models is a rigorous way to test a model. The authors could have improved the discussion on how performance varies and what that implies about the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a method for improving the representation of chunked text in a prompt by computing query-specific representations (margin notes) for each chunk. They hypothesize that this expanded and query-specific text allows for more efficient and effective decoding. To test this, the authors apply their method to several baseline models across three tasks: multi-hop reasoning, single-hop retrieval, and aggregation. Post-hoc analysis involves an ablation study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **No formal statement of hypotheses.** This is perhaps implicit, but given the number of experiments, it is essential to be explicit about the precise hypotheses the experiments test. As best I can tell, one hypothesis is that treatment with margin notes will be better than treatment with other methods (LLM and RAG baselines) across a fixed condition (e.g., length variant, task). There are some allusions to other hypotheses (e.g., comparisons across columns), but that's less clear. This is important because of the next point.\n* **No formal hypothesis tests.** There are a lot of numbers in Table 4+. Results in bold seem to be the max within some context. However, it's not clear if any of these differences are (a) statistically significant and/or (b) if those tests have accounted for multiple comparisons (since these datasets are being reused...a lot). Without this, it's difficult to understand the robustness of these results. In order to address this, you can consult the literature on significance testing (Cohen's \"Empirical Methods for Artificial Intelligence\" is good; tutorials from the RecSys/information retrieval communities are also good) and correcting for multiple comparisons (see those tutorials from the RecSys/information retrieval communities).\n* **Writing falls off at the end.** Starting with the ablation experiments (Section 5), the flow and writing of the paper weaken. Why do these ablation experiments make sense? What are the implications? What is the argument of Section 6? How are all of these things connected to the core hypothesis of the paper?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Larger models seem to profit less from WiM (table 4), and you do not include models larger than 70B. Would models larger than 70B still see improvements with WiM? Can you discuss this in more detail?\n2. RAG is best with SQuAD in many cases, and almost always better than WiM. You argue that with multihop Q&A this is no longer the case (as shown in table 4), but isn't this only true for your RAG implementation / approximation, and more sophisticated RAG systems would improve this score?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- interesting and original idea\n- comparison with several base lines\n- improvements over these baselines"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose and investigate the usage of intermediate information (margins) for improving long-context retrieval. They compare different small and medium-size LLMs as well as a RAG like system and find improvements over these baselines in many cases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- comparison and discussion not complete, as larger models (which show less improvements) and more sophisticated RAG systems are not included"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the \"Weaknesses\"."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The authors introduce a novel inference pattern called \"Writing in the Margins\" (WiM) that leverages the chunked prefill mechanism in large language models to generate intermediate \"margin notes\" that can guide the final prediction. This is a clever way to address the challenges of long-context processing in retrieval-oriented tasks.\n\n* The results show that WiM can significantly boost the performance of off-the-shelf models across a range of long-context benchmarks, including multi-hop reasoning and aggregation. This demonstrates the effectiveness of the proposed approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a new inference pattern called \"Writing in the Margins\" (WiM) that addresses the challenges of processing long input contexts in retrieval-oriented tasks. WiM leverages the chunked prefill mechanism in large language models to generate intermediate \"margin notes\" that summarize relevant information for the given query. These margin notes are then incorporated into the final response, leading to significant performance boosts on benchmarks like HotpotQA and Common Words Extraction compared to vanilla long-context models and retrieval-augmented approaches. The paper also discusses how WiM can enhance the transparency and interactivity of the retrieval process by providing users with real-time insights into the model's reasoning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The experimental setup could be expanded to include more baselines, such as state-of-the-art models specifically designed for long-context processing to better assess the relative performance of WiM.\n\n* While the results are strong, the paper could benefit from a deeper analysis of why WiM works well for some tasks (e.g., multi-hop, aggregation) but not as consistently for others (e.g., single-hop QA). Understanding the underlying mechanisms behind these performance differences would strengthen the contributions."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Writing in the Margins (WiM) is a new inference pattern for long context LLMs that leverages chunked prefill to improve retrieval tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024writing,\ntitle={Writing in the Margins: Better Inference Patterns for Long-Context Retrieval},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=56mg1JFd3n},\nnote={under review}\n}"
},
"abstract": {
"value": "In this paper, we introduce Writing in the Margins (WiM), a new inference pattern for Large Language Models designed to optimize the handling of long input sequences in retrieval-oriented tasks. This approach leverages the chunked prefill of the key-value cache to perform segment-wise inference, which enables efficient processing of extensive contexts along with the generation and classification of intermediate information (\"margins\") that guide the model towards specific tasks. This method increases computational overhead marginally while significantly enhancing the performance of off-the-shelf models without the need for fine-tuning. Specifically, we observe that WiM provides an average enhancement of 7.5% in accuracy for reasoning skills (HotpotQA, MultiHop-RAG) and a 30.0% increase in the F1-score for aggregation tasks (CWE). Additionally, we show how the proposed pattern fits into an interactive retrieval design that provides end-users with ongoing updates about the progress of context processing, and pinpoints the integration of relevant information into the final response. We release our implementation of WiM using Hugging Face Transformers library at <anonymised URL>."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"chunked prefill",
"long context inference",
"interactive inference"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/966c33fbe9610cbe646e09db39428acec9b13757.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Writing in the Margins: Better Inference Patterns for Long-Context Retrieval"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
56vHbnk35S | Graph-Guided Scene Reconstruction from Images with 3D Gaussian Splatting | main | Active | 3D Gaussian Splatting;VR;3D Reconstruction;NeRF;Large-Scale Scene Reconstruction;Graph | applications to computer vision, audio, language, and other modalities | 3;5;6 | 4;3;5 | 2;3;3 | 2;2;3 | 3;3;3 | 4.666667 | 4 | 2.666667 | 2.333333 | 3 | 0.327327 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- See questions in Weaknesses sections. \n- What does \"w/o structure estimation\" mean in Table 4. \n- Which datasets are used for results in Table 4, Table 5, Table 7. \n- Is Dust3r used for only pairwise pose estimation or is the step that yields globally aligned point maps and poses also used?\n- Definition of S_3^i in Eqn 1. is ambiguous, can you clarify what does this set include?\n\nMinor : \n- Abstract mentions that : \"This paper investigates ... reconstructing high-quality, large-scale 3D open scenes from images.\" but the paper deals with scenes with 100 images, 600 images, and two scenes with 1500-2000 images. Typically large-scale in context of SfM and 3DGS refers to city-scale scenes with tens of thousands of images. \n- It should be clarified if I(p) in Eqn 8 means \"intensity or color at pixel p in the image\" refers to GT image or rendered image."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Overall, I like that the authors propose very practical optimizations that bring both quality and run-time improvements. \n\n* Originality: The key original contribution of the paper is: Concentric Nearest Neighbor Pairing (CNNP) and Quadrant Filter (QF) organizing/pruning view-pairs in camera-graph. Other contributions are good practical applications of previously known ideas. \n* Clarity: The paper is written in a clear language, structured well, and shows experimental validation of the proposed ideas. \n* Significance: The paper introduces practical ideas for 3DGS from no-pose image collections."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a collection of practical optimizations to improve quality and efficiency of Gaussian Splatting reconstructions from image collections without poses. The optimizations relate to \n1. Efficient View-Pair Finding for Match-graph Construction for Structure from Motion\n2. Octree initialization of 3D points and Level-of-details based pruning\n3. Multi-view Consistency Loss in 3DGS optimization\n4. Match-graph / Camera-graph Importance based View Sampling for 3DGS optimization\n\nThe author show results indicating, \nOptimization 1 leads to faster SfM (Table 3) and quality improvements in GS (Table 4)\nOptimizations 2, 4 leads to faster GS optimization (Table 5, Table 7)\nOptimizations 3, 4 leads to quality improvements in GS (Table 4, Table 7)\n\nThe results are evaluated on scenes from Waymo, Kitti, and Mill-19 datasets with images in the range of ~600, ~100, and ~2000."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The method proposed in the paper is forming camera-graph using Dust3r to find relative poses between image pairs and pruning pairs using the proposed CNNP and QF steps. The efficiency of structure estimation is estimated w.r.t default COLMAP pipeline (assuming incremental Structure from Motion). This is not a fair comparison and sufficient details are not provided, making it harder to assess the true benefit of the proposed improvements. \n\n- Paper mentions that Dust3r is used to estimate pairwise relative poses in 0.01 seconds. Can you provide a more detailed breakdown of the Dust3r usage, including whether the 0.01 seconds is per pair or total, what specific hardware was used, and how many total pairs were evaluated across the different datasets\n\n- COLMAP exhaustive and COLMAP vocab-tree matching time are provided. However, sufficient details on the experimental setup and compute resources are not provided. For example, for vocab-tree matching, which dataset is used to compute the vocab-tree, how many nearest neighbors are retrieved per image, in total how many pairs are evaluated? What compute resources are used for this matching? \n\nWithout these details, it is difficult to draw conclusions. \n\nAt a more basic level, Dust3r + CNNP + QF contributions are mainly to improve match-graph construction and BA runtime. A fair comparison of the improvements in these runtimes should be with other SoTA efficient SfM methods, not default COLMAP. \n\nDefault incremental SfM implemented with COLMAP is commonly used by radiance field papers to compute poses but by no means this is the most efficient pipeline. There is a vast literature on how to approximate match-graph construction going back a decade. There are well-established alternatives to incremental Structure from Motion with implementations in Open source SfM libraries such as OpenMVG, OpenSfM, Theia, and most recently GLOMAP that offer much better run-time behavior. \n\nGiven the authors use prior poses estimated from Dust3r, the comparison of CNNP and QF steps should be done with match-graph pruning methods that already use prior poses. A naive baseline to compare against would be to construct a camera-graph only from view pairs with overlapping frusta. Can you add this comparison to your evaluation, evaluating both quality and run-time?\n\nI like the practicality of proposed ideas but I don't think that they are contextualized and compared correctly. The other ideas such as LOD-based point pruning and view-importance based sampling are nice practical improvements which provide qualitative and runtime gains w.r.t. original 3DGS paper, however 3DGS provides a baseline not SoTA comparison. A number of methods have been proposed since the original paper to improve both, the quality and efficiency of 3DGS (2DGS, RadSplats, . A few relevant to the paper: Hierarchical 3D Gaussian Representation (Kerbl et al SIGGRAPH Asia 2024), Scaffold-GS (Lu et al CVPR 2024), Octree-GS (Ren et al). \n\nThe authors can also provide results on MipNeRF360 dataset which is used more commonly in radiance field literature, this will make it easier to compare their results against contemporary 3DGS methods.\n\nAs is, the paper is an assortment of good practical improvements for a sparse recon + 3DGS reconstruction system, and I am positive that these insights can be valuable for practitioners in the field. However, these small contributions are scattered across the pipeline and none are evaluated as thoroughly as they should be with SoTA methods and good baselines respectively for each, making it difficult to place the value/significance of contributions in context of SoTA."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. If the spatial prior-based structure relies on the initial pose estimation, wouldn’t inaccurate initial results lead to a poorly constructed graph?\n2. In Table 1, why doesn’t the FPS of the proposed method exceed that of the original 3D Gaussian Splatting (3DGS)? Intuitively, the pose estimation should contribute positively. Where is the additional computation time being spent?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. the paper is easy to understand.\n2. the results show the effectiveness of proposed pipeline."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Focus on the large-scale Gaussian-based reconstruction pipeline, the authors propose a graph-guided framework, GraphGS, which leverages spatial priors for scene structure estimation to create a camera graph encoding camera topology. Using graph-guided multi-view consistency and an adaptive sampling strategy, GraphGS enhances the 3D Gaussian Splatting optimization, mitigating overfitting to sparse viewpoints and accelerating reconstruction. Quantitative and qualitative evaluations across multiple datasets demonstrate that GraphGS achieves state-of-the-art performance in 3D reconstruction."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The manuscript lacks a detailed explanation for each term in Equation (1), which would enhance clarity and understanding.\n2. Many of the authors' methods are designed to improve upon COLMAP. It would be beneficial to include experiments comparing the accuracy of initial values in GS, such as pose accuracy, to illustrate the improvements."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What is the minimum quality of poses required by the proposed framework? \nDoes any of the proposed steps account for large errors in the pose estimation? \nWhy do the authors attempt to compete strongly with COLMAP? COLMAP was used as a method to obtain initial poses and the authors used wang et al. 2024 to obtain initial poses. The framework the authors propose can be used with poses computed by either method."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors present an elaborate pre-processing framework to effectively scale Gaussian splitting to large scenes. The ideas presented in the paper are exciting and well-presented for the most part. The concept of exploiting low-cost prior heuristics to allow the network to focus on the underlying task is interesting and the experimental evaluation demonstrates the effectiveness of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a framework for large-scale 3D reconstruction using Gaussian splatting. The authors suggest the construction of a prior graph-guided scene structure. This results in estimating a camera graph that encodes the camera topology. Then based on graph weights the employ an adaptive sampling strategy to the 3D Gaussian splatting optimization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) minor: There are some spelling errors, please proofread the manuscript e.g ( line 135 formed -> form, line 138 initializaition -> initialization )\n\n2) Line 147 - 149: Please specify here which models you use to obtain camera poses. How approximate are these poses? What is the error tolerance? Could you please provide quantitative metrics on the initial pose quality compared to ground truth if available?\n\n 3) For the concentric nn pairing the authors use the symbol S multiple times. It would make sense to use CNNR as a symbol of the overall process output and use more meaningful names than S1 S2 and S3 for the various heuristics. Maybe names related to their role in CNNR computation. \n\n4) In the quadrant filer could you please specify whether the orientation is provided as a normal or any other form (euler angles)?\n\n5) The adaptive sampling section is not clear. \n - line 322 primarily considers two criteria -> Are there more criteria than these two?\n - Line 344 We design node weight wn(i) based on betweenness centrality -> So the node weight does not take into account the degree centrality? \n - How is the view selection probability integrated into the 3DGS optimization? \n\n6) Lines 457-458: Could you please provide more information on how you obtained your initial poses and what is the size of the dataset, the hardware used, or any preprocessing stats that the relative pose estimation is done in 10ms?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024graphguided,\ntitle={Graph-Guided Scene Reconstruction from Images with 3D Gaussian Splatting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=56vHbnk35S},\nnote={under review}\n}"
},
"abstract": {
"value": "This paper investigates an open research challenge of reconstructing high-quality, large-scale 3D open scenes from images. It is observed existing methods have various limitations, such as requiring precise camera poses for input and dense viewpoints for supervision. \nTo perform effective and efficient 3D scene reconstruction, we propose a novel graph-guided 3D scene reconstruction framework, GraphGS. Specifically, given a set of images captured by RGB cameras on a scene, we first design a spatial prior-based scene structure estimation method. This is then used to create a camera graph that includes information about the camera topology. Further, we propose to apply the graph-guided multi-view consistency constraint and adaptive sampling strategy to the 3D Gaussian Splatting optimization process. This greatly alleviates the issue of Gaussian points overfitting to specific sparse viewpoints and expedites the 3D reconstruction process. We demonstrate GraphGS achieves high-fidelity 3D reconstruction from images, which presents state-of-the-art performance through quantitative and qualitative evaluation across multiple datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D Gaussian Splatting",
"VR",
"3D Reconstruction",
"NeRF",
"Large-Scale Scene Reconstruction",
"Graph"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3f6158050521ac6d4c00bb0779995d5b744205d6.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Graph-Guided Scene Reconstruction from Images with 3D Gaussian Splatting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
57EjN072hl | COT Flow: Learning Optimal-Transport Image Sampling and Editing by Contrastive Pairs | main | Active | generative models;consistency models;diffusion models;optimal transport | generative models | 3;5;5 | 4;4;3 | 2;3;2 | 2;3;2 | 2;2;2 | 4.333333 | 3.666667 | 2.333333 | 2.333333 | 2 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see above"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-written"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper present Contrastive Optimal Transport Flow (COT Flow), a method that achieves fast and high-quality generation with improved zero-shot editing flexibility compared to previous diffusion models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The performance improvement of the paper is not significant.\n\n2.The comparison method is outdated; SDedit is a work from two years ago.\n\nThis paper has neither impressive results nor significant improvements. I did not find any highlights in this paper, leaning towards a rejection."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Does COT Flow truly address the generative learning trilemma? The comparison only includes FID scores, but if the model claims to tackle the trilemma, it should demonstrate comparisons in terms of sampling speed, quality, and diversity against existing methods.\n2. What is the training process for the neural optimal transport model?\n3. Comparison methods are limited. Numerous recent studies on diffusion-based unpaired image-to-image translation tasks should be included, such as [1], [2], and [3]. Additional comparisons with more recent or relevant baselines could strengthen the validity and impact of the findings.\n\n[1] Korotin, A., Selikhanovych, D., & Burnaev, E. Neural Optimal Transport. In The Eleventh International Conference on Learning Representations.\n[2] Su, X., Song, J., Meng, C., & Ermon, S. Dual Diffusion Implicit Bridges for Image-to-Image Translation. In The Eleventh International Conference on Learning Representations.\n[3] Zhao, M., Bao, F., Li, C., & Zhu, J. (2022). Egsde: Unpaired image-to-image translation via energy-guided stochastic differential equations. Advances in Neural Information Processing Systems, 35, 3609-3623."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The proposed COT Flow model effectively enables unpaired image-to-image translation.\n2. The COT Flow model shows potential for various zero-shot image editing scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes the COT Flow model by integrating neural optimal transport and consistency models. Based on the similarities between contrastive learning and consistency models, the authors introduce a new method for defining positive pairs. Furthermore, they demonstrate that the COT Flow can be applied to zero-shot image editing."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Comparison methods are limited.\n2. Quantitative evaluations are limited, relying solely on FID scores.\n3. Qualitative results lack visual impact."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How to use this flow to open-set text-based image editing? What is the source distribution and target distribution?\n2. What is the role of \\phi_\\omiga in Eq.10 and 11? Eq.10 and 11 confuse me.\n3. In Algorithm 1 COT Training, if the encoder learns to output all zero, the loss function will be zero, how to handle this problem?\n\nI will be happy to raise the rating if the response is good."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This work presents Contrastive Optimal Transport Flow (COT Flow), a new method that achieves fast and high-quality generation with improved zero-shot editing flexibility compared to previous diffusion models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work presents Contrastive Optimal Transport Flow (COT Flow), a new method that achieves fast and high-quality generation with improved zero-shot editing flexibility compared to previous diffusion models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Experiments are not enough only on handbag→shoes (64×64), CelebA male→female (64×64), and outdoor→church (128×128). These tasks are unuseful.\n2. The images are too small and unclear.\n3. The comparison methods are too old. It should compare with at least some of the latest text-based image editing methods in 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Generative flow using optimal transport regularization"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024cot,\ntitle={{COT} Flow: Learning Optimal-Transport Image Sampling and Editing by Contrastive Pairs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=57EjN072hl},\nnote={under review}\n}"
},
"abstract": {
"value": "Diffusion models have demonstrated strong performance in sampling and editing multi-modal data with high generation quality, yet they suffer from the iterative generation process which is computationally expensive and slow. In addition, most methods are constrained to generate data from Gaussian noise, which limits their sampling and editing flexibility. To overcome both disadvantages, we present Contrastive Optimal Transport Flow (COT Flow), a new method that achieves fast and high-quality generation with improved zero-shot editing flexibility compared to previous diffusion models. Benefiting from optimal transport (OT), our method has no limitation on the prior distribution, enabling unpaired image-to-image (I2I) translation and doubling the editable space (at both the start and end of the trajectory) compared to other zero-shot editing methods. In terms of quality, COT Flow can generate competitive results in merely one step compared to previous state-of-the-art unpaired image-to-image (I2I) translation methods. To highlight the advantages of COT Flow through the introduction of OT, we introduce the COT Editor to perform user-guided editing with excellent flexibility and quality."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"generative models",
"consistency models",
"diffusion models",
"optimal transport"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/db2ab2872849147cae4c83eb71871eae4c11fdd7.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/0be9ecfa9208f698d60196c30a0ac9478180c537.zip"
},
"title": {
"value": "COT Flow: Learning Optimal-Transport Image Sampling and Editing by Contrastive Pairs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
57NfyYxh5f | How to Probe: Simple Yet Effective Techniques for Improving Post-hoc Explanations | main | Active | Interpretability;Explainable AI;Representation Learning | interpretability and explainable AI | 5;5;6;8 | 5;5;4;4 | 2;2;3;4 | 2;3;3;3 | 2;4;4;4 | 6 | 4.5 | 2.75 | 2.75 | 3.5 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tClarity and Organization: The paper is exceptionally well-written and structured, enhancing readability and accessibility of the key finding\n2.\tThe study reveals that training probes using binary cross-entropy (BCE) loss instead of the traditional cross-entropy (CE) loss consistently enhances interpretability metrics. The analysis of the Softmax Shift-Invariance Issue in interesting and insightful. This could have substantial implications for various DNN-based applications.\n3.\tThe improvements in interpretability metrics are shown to be consistent across various training methods for the visual encoder. The robustness of these findings was thoroughly validated using diverse learning paradigms, including supervised, self-supervised and CLIP."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper challenges the tradition notation that model explanations are independent of training methods by demonstrating that the quality of attributions for pre-trained models depends significantly on how the classification head is trained. It shows that using binary cross-entropy (BCE) loss instead of conventional cross-entropy (CE) loss leads to marked improvements in interpretability metrics across several visual pre-training frameworks. Furthermore, it is found that the non-linear B-cos MLP probes boost the class-specific localization ability of attribution methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\t(Major) Limited Model Diversity: The research exclusively utilizes the ResNet50 model backbone, which canot adequately represent the behavior across various architectures. Testing additional backbones, especially Vision Transformers (ViTs), and incorporating explanation methods tailored for these models (referenced as [1][2][3]), would provide a more robust validation of the findings.\n2.\tInclusion of Additional Methods: The paper could be strengthened by including more population perturbation-based methods, such as RISE [4] and Score-CAM [5], to further substantiate the interpretability improvements.\n3.\tSelection of Examples: Concerns arise regarding whether the examples shown in Figures 1 and 6 are cherry-picked, especially since the GridPG Score in Figure 5 suggests that the BCE model does not always perform perfectly. Including a broader range of examples, particularly where the BCE model scores lower on the GridPG, would offer a more comprehensive understanding and enhance the paper's credibility. \n\nI would be happy to improve my rating of the paper if these issues are addressed thoroughly.\n\n[1] Transformer interpretability beyond attention visualization. \n\n[2] Generic Attention-model Explainability for Interpreting Bi-Modal and Encoder-Decoder Transformers.\n\n[3] Vit-cx: Causal explanation of vision transformers.\n\n[4] RISE: Randomized Input Sampling for Explanation of Black-box Models.\n\n[5] Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In the case of backbone freezing, increasing classifier parameters can improve performance. Is the design of B-cos MLP necessary? Is MLP not possible?\n2. Can you provide more loss function results to verify Softmax Shift-Invariance? How about the cross-entropy loss?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper reveals and demonstrates the strong dependence of post-hoc importance attribution methods on the training details of the classification layer in pre-trained models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discovers and demonstrates the strong dependence of post-hoc importance attribution methods on the training details of the classification layer of the pre-trained model. Based on this findings, the paper also proposes a simple but effective adjustment to the classification layer to significantly improve the quality of model explanations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The experimental method is limited to ResNet50, and the results are not extensive enough. Thus, experimental results are not convincing enough to verify the effectiveness of their methods.\n\n2. The contribution of this article is not enough. The author discovered the impact of training details on post-processing methods, but the evaluation metrics used and the subsequent B-cos model are not the author's innovation.\n\n3. [minor] Figures in this paper have obvious flaws. It will be better that authors carefully revise their figures."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. I am still confused as to why CE produces worse attribution than BCE. Could the authors explain this again?\n 2. Also, why is it that the last output layer is so important? Why is the rest of the model have such little importance?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, it is an interesting read and demonstrates some interesting results. The writing is generally clear, the issue is well defined, and the experiments are impactful. It is difficult for me to say exactly what the authors did well, other than that it is a good read. \n \n1. In-depth motivation section, outlining the issues around generating consistently clear attributions\n2. Plenty of qualitative results\n3. Experiments over a variety of pre-trained models and datasets\n4. The authors clearly show that this is an attribution-invariant issue."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The main motivation behind this work is that two models using the same training regime and ending at the same loss can produce two extremely different attributions for the same image. The authors demonstrate that the training paradigm for the final classification layer of a network is the most important decider in generating more precise attributions, regardless of the attribution method. They specifically show that a binary cross entropy trained output layer produces better attributions than a cross entropy trained output layer. The increase in attribution quality does typically come at the cost of <10% accuracy reduction when using a linear layer, but the accuracy can be improved by using a more complex output layer."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There isn't any discussion of why there is an increase in accuracy and attribution quality with more complex output layers. Is it as simple as the layers being larger, or is there another reason? I assume proper train, test, and validation sets have been used?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Would the authors suggest the development of future classification models take into consideration the information in this paper? \n\nDo the authors think that a training loss could be created to further improve explainability as the minor differences in CE and BCE have a significant effect?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "This paper is very well written and planned. Not only are the approaches and findings very clear but the authors provide extensive support of their findings over numerous models, datasets, attribution methods, and metrics. \n\nThe choice to study multiple pre-training approaches adds significant strength to their arguments and findings. \n\nThe overall findings are simple, but impactful for future considerations of interpretable model design, post-hoc explainability, and improving model interpretation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors find and support an interesting observation that the method of training the classifier layer of a model has a significant impact on the results of post-hoc attribution methods. Because many post-hoc attribution methods assume that model training does not have an impact, they find that this must be reconsidered, and in fact, simply modifying the method of training the last linear layer(s) can improve model accuracy and explainability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are not significant weaknesses to address. There are minor spelling mistakes, but it does not hurt the delivery of the information."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024how,\ntitle={How to Probe: Simple Yet Effective Techniques for Improving Post-hoc Explanations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=57NfyYxh5f},\nnote={under review}\n}"
},
"abstract": {
"value": "Post-hoc importance attribution methods are a popular tool for “explaining” Deep Neural Networks (DNNs) and are inherently based on the assumption that the explanations can be applied independently of how the models were trained. Contrarily, in this work we bring forward empirical evidence that challenges this very notion. Surprisingly, we discover a strong dependency on and demonstrate that the training details of a pre-trained model’s classification layer (<10% of model parameters) play a crucial role, much more than the pre-training scheme itself. This is of high practical relevance: (1) as techniques for pre-training models are becoming increasingly diverse, understanding the interplay between these techniques and attribution methods is critical; (2) it sheds light on an important yet overlooked assumption of post-hoc attribution methods which can drastically impact model explanations and how they are interpreted eventually. With this finding we also present simple yet effective adjustments to the classification layers, that can significantly enhance the quality of model explanations. We validate our findings across several visual pre-training frameworks (fully-supervised, self-supervised, contrastive vision-language training) and analyse how they impact explanations for a wide range of attribution methods on a diverse set of evaluation metrics."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Interpretability",
"Explainable AI",
"Representation Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9d39cae3f1e452e3a034ede0227b6add97082654.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "How to Probe: Simple Yet Effective Techniques for Improving Post-hoc Explanations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
57iQSl2G2Q | Safe Bayesian Optimization for Complex Control Systems via Additive Gaussian Processes | main | Active | Safe Bayesian Optimization;Complex Control Optimization;Additive Gaussian Processes | optimization | 1;3;5;10 | 4;5;2;4 | 2;2;3;4 | 2;2;2;4 | 2;3;3;4 | 4.75 | 3.75 | 2.75 | 2.5 | 3 | -0.154326 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1: Can you include an experiment with a truly high-dimensional system? \n\n2: As mentioned in the weakness section, I think cascade controllers are not a good example. I suggest something like distributed controllers in large water/power networks. Could you test the algorithms on more complex systems?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper addresses an important problem of (safe) BO, which is the limitation to low-dimensional problems. \n- Using additive kernel functions in a safe BO setting to increase its performance in high dimensional problems seems, to the best of my knowledge, a novel idea. The results indicate that this approach can be superior to \"standard\" safe BO approaches for some systems. \n- To the best of my knowledge, the math seems to be sound and the proofs are correct."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose a safe Bayesian optimization framework utilizing Gaussian processes with additive squared exponential functions. The motivation for the new kernel function is the poor performance of the \"standard\" SE kernel for high-dimensional spaces. The authors argue that using BO for controller tuning often requires operating in high-dimensional spaces due to a large number of parameters of the controllers to be considered. It is experimentally evaluated that the proposed algorithm, SafeCtrlBo, can lead to better results than other safe BO algorithms. Furthermore, the authors present theoretical results on the finite-time convergence of the algorithm.\n\nThe main contribution is utilizing additive SE-kernels in a Bayesian optimization framework and proofing the finite-time convergence."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main motivation for introducing the new kernel function is its superior in high dimensions. However, all evaluations in the paper are still pretty low-dimensional, with dimensions up to 10. In fact, in the related work it is mentioned that existing safe BO approaches work with systems where \"three controller [...] each controller having only two parameters\". However, the proposed algorithm is experimentally evaluated on the exact same numbers of parameters.\n\n2. I agree that controller tuning including many parameters can be tricky. However, cascaded controllers are a bad example of that. In practice, these structures can be tuned very efficiently as you start with the inner loop until a specific performance is reached, then the next loop, and so on. All parameters are very meaningful, and the process is quite transparent. Furthermore, \nmotor controllers are a bad example for safe BO. As long as no load is attached to the motor (something you don't do for tuning the controllers), it is almost impossible to destroy the system. Typical amplifiers do have a \"max current\" setting, or it is simply defined in the software. Therefore, I cannot understand the safety concerns that the authors mentioned for the experiment.\n\nLong story short: Although the adapted safe BO method sounds interesting, the motivation with cascade controllers and the experimental evaluation are not a good choices."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The statement \"Srinivas et al. (2010) demonstrated that BO methods can converge to the global optimum of unknown performance functions in fewer steps compared to genetic algorithm.\" is incorrect. Nothing in the paper compares their approach to a genetic algorithm, and there is no discussion on this.\n\nSimilarly, the statement \"However, SAFEOPT uses Gaussian kernels as the covariance function of the Gaussian processes, which is effective mainly for low-dimensional problems (Bengio et al., 2005)\" is inaccurate. Berrkenkamp et al. (2016) use a Matern kernel and there is nothing in Sui et al. (2015) that indicates that a squared-exponential kernel is necessary.\n\nIs equation (1) correct? Shouldn't the last term have t in the subscript instead of k? The same holds for equation 2.\n\nI feel that theorems 4.1 and 4.2 are mostly trivial and should be placed in the appendix.\n\nDoes line 5 in the pseudocode simply mean that the algorithm picks the point with maximal variance in B_n? If so, why not just write sigma insteadl of un-ln?\n\nThe presentation of outermost evaluated safe points is somewhat confusing. Is a_{oes} in the dataset? Would it help to introduce the data set \\mathcal{D}_n and to write a_{oes} \\in \\mathcal{D}_n?\n\nTheorems 5.1 and 5.2 are unclear to me. What do the authors mean by the \"maximum allowable uncertainty for the exploration to converge to an ( \\epsilon )-reachable safe region.\" and \"Maximum allowable uncertainty for the performance function to converge to a ( \\zeta )-optimal function value.\"?\n\nIn the proof of Lemma C.1, the step from line 878 to 880 is incomplete. To show that the posterior variance is indeed increasing, the authors also need to show that [K_t^{-1} k_t(x)]_i is positive, which the authors do not do.\n\nI might have missed something, but I am not sure if the statement in line 894 holds. Take, for example, the points x_sb =1, x_oes = 0 and x_i = 10. For \\lambda=0, we have || x(\\lambda) - x_i ||_2 = 10 > || x_sb - x_oes||_2 = 1, yet the inner product (x(\\lambda) - x_i ) (x_sb-x_oes) = (0-10 ) (1-0) <0 is negative."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-motivated and written in a very clear form. It also provides an adequate review of related works. The proposed algorithm is an interesting contribution and shows good empirical results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propse a novel algorithm for safe Bayesian optimization that exploits purported properties of the squared-exponential kernel to facilitate the expansion of the safe set. The proposed idea is interesting and the authors report good theoretical results. However, the theoretical exposition is poor and the corresponding proofs are either incomplete or incorrect. Though I recommend rejection for this reason, I am open to changing my score if the authors improve the paper accordingly."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The theoretical results are mostly stated usinng plain text instead of mathematical formulas, which makes them somewhat ambiguous and hard to understand at times.\n\nThe proofs of the theoretical results are incomplete and potentially wrong."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please address the mentioned weaknesses. \n\nAdditional questions on the hardware experiments: \n- When does the exploration phase stop, when does the exploitation phase start?\n- How were the GP hyperparameters chosen? \n- Why and when do safety violations occur?\n- How can $T_0$ be chosen in real-world applications?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "(S1) Clear motivation and objective \n\n(S2) Demonstration and benchmarking of different safe BO algorithms on synthetic function and in a real-world use case. The application of multiple algorithms as baselines makes the results significant for the BO community. \n\n(S3) The provided implementation gives clear insights into the method and the experiments. \n\nIn general, the contribution is reasonable. Focusing on the border of the safe set to reduce computational effort is a logical approach. Additive kernels seem to be able to enhance the performance of safe BO algorithms for high-dimensional systems. However, it remains unclear how to choose kernel hyperparameters in practice."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses safe Bayesian optimization (BO) to optimize the parameters of cascaded control systems. To make safe BO more suitable for this task, additive kernels are used as a model, and a new definition of expander sets is introduced, which makes the BO optimization more compute efficient. The method is benchmarked on synthetic functions and a hardware setup, tuning parameters for a field-oriented control algorithm in a permanent magnet synchronous motor. Although the proposed method outperforms other benchmarks, all tested safe BO methods result in safety constraint violations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I see the following main weaknesses:\n\n(W1) The embedding of related work is insufficient. There are lines of work, which seem closely related, but are not cited appropriately. \n\n(W2) Theoretical results are partially imprecise. Some of the results have been known before and build on previous results, yet these are not cited. \n\n(W3) In the empirical study, information is missing on how to apply the algorithm and how hyperparameters are chosen. The choice of heuristics is not sufficiently documented and discussed.\n\n(W4) Safety violations in hardware experiments are not adequately discussed.\n\nBelow, I provide a more detailed discussion of these weaknesses, ordered by sections.\n\n\n## Related work:\n\n* In related work, Bottero et al. 2022 is missing. It would make sense to compare against this, as this work explicitly focuses on safe BO without expander sets. \n* Furthermore, studies on safe/constrained BO for cascaded control systems, such as Khosravi et al. 2022, seem directly related but are not mentioned. \n\n\n## Theoretical Results: \n\n* Lemma 4.1 is a known result that can be found in e.g., Berlinet and Agnan-Thomas 2004 Theorem 5\n* Theorem 4.2 is imprecise; further information on this can be found in Fiedler 2023 (Section 4) \n* The theoretical results section clearly builds on Chowdhury and Gopalan 2017, Theorem 2, using their results for confidence bounds without referencing this paper. \n* There are more recent results on confidence bounds in GP Regression, which lead to more conservative bounds and do not rely on the information gain. Information on this can be found in Whitehouse et al. 2024\n* Theorem 5.1: The RKHS norm of the safety functions is bounded by $B$ , not the safety functions themselves\n\n## Empirical Study \n\nFor the stage-wise approach it is unclear, when to go to the next stage. This can only be chosen through a heuristic. In the paper it remains unclear, how to choose this in a practical application. \n\nIn the synthetic experiments, only a noise free setting is evaluated. The results would be more relevant and would underline the contribution of the method more, if function evaluations would be noisy. In addition, this setting would also better represent real-world settings. \n\nIn the experimental evaluation, the choice of hyperparameters of the GP and the choice of $\\beta$ is unclear. While in the theoretical derivations, the RKHS-norm bound and the information gain are used, in the experiments, $\\beta = 2$ is used as a heuristic. This choice is common in other safe BO works; however, it is not made transparent in the paper and can only be found in the implementation. The use of this heuristic invalidates all proven safety guarantees. A detailed discussion on this can be found in Fiedler et al. 2024.\n\nThe choice and the long lengthscales in the kernel lead to safety violations. Typically, shorter lengthscales compared to the domain size are applied in safe BO applications. This limits performance while exploring but can lead to fewer safety violations. \nIt would be interesting to know when safety violations occurred in the hardware experiments. I assume this is mostly in the first exploration stage.\n\nFor the hardware experiments, it is unclear what $T_0$ is and if there is even a switch in the exploitation stage. \n\n## Discussion\n\nThe safety violations that occur in the empirical results need to be more thoroughly discussed. In light of these, the claim in the conclusion that it \"can be seamlessly integrated into real-world complex control applications.\" is too confident, having observed 39 safety violations in the hardware experiments. \n\n\n## Minor comments: \n\nLine 191 and 194: Definitions of safe set and expander sets in Sui 2015 and Sui 2018 are different\nLine 206-207: This statement is wrong. Berkenkamp 2016 uses a Matern kernel; generally, in many SafeBO applications Matern kernels with $\\nu = 3/2$ are used.\nLine 333: Typo: Lipschitz continuous\nLine 317-318: Grammar in Theorem 5.1 and Theorem 5.2 \"with R-sub-Gaussian\" \n\n\n## References (not in the paper)\n\n- Berlinet, A., & Thomas-Agnan, C. (2004). _Reproducing Kernel Hilbert Spaces in Probability and Statistics_. Springer Science & Business Media.\n- Bottero, A., et al. (2022). Information-theoretic safe exploration with Gaussian processes. _Advances in Neural Information Processing Systems_, 35, 30707-30719.\n- Chowdhury, S. R., & Gopalan, A. (2017). On kernelized multi-armed bandits. In _International Conference on Machine Learning_ (PMLR).\n- Fiedler, C. (2023). Lipschitz and Hölder continuity in reproducing kernel Hilbert spaces. _arXiv preprint arXiv:2310.18078_.\n- Fiedler, C., Menn, J., Kreisköther, L., & Trimpe, S. (2024). On safety in safe Bayesian optimization. _Transactions on Machine Learning Research_.\n- Khosravi, M., et al. (2022). Safety-aware cascade controller tuning using constrained Bayesian optimization. _IEEE Transactions on Industrial Electronics_, 70(2), 2128-2138.\n- Whitehouse, J., Ramdas, A., & Wu, S. Z. (2024). On the sublinear regret of GP-UCB. _Advances in Neural Information Processing Systems_, 36."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "What are the consequences of violations of constraints in this test application? Do they make the algorithm unsafe for use in practice? Is there a parameter that can be changed to reduce the number of violations? Does that result in a trade-off between performance and number of constraint violations, and if yes, how can this trade-off be handled?"
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The proposed method for using additive kernels in safety-aware BO leads to optimization in fewer trials, which is of great practical significance for physical control systems with safety limitations. The paper is written clearly, references prior work in the field, and explains very well the connections of the proposed method to that work. Although safe BO and additive functions are not novel ideas individually, their combination is original and based on significant technical insight. Properties of the proposed additive kernels are analyzed well theoretically. A novel acquisition function is proposed that is faster to compute than previously proposed ones. \n\nFurthermore, the method is tested on a physical control system set-up that is of real practical use. Because the optimized parameters of the controllers are their proportional and integral gains, the proposed method could potentially find widespread use in industrial practice, where the use of PI controllers is very common and their tuning is known to be notoriously tricky and laborious."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper describes a method for safe Bayesian optimization suitable for control applications where safety concerns limit the set of parameters that can be tried. The main contribution is the use of additive kernels in safe BO, resulting in faster optimization. This contribution is of major significance when applying BO to physical systems, where control trials are slow and costly to perform. A second contribution is the speedup of the computation of the expander set, affecting the computational time of the algorithm. The proposed method has been verified on benchmark functions in simulation, as well as on a challenging control problem involving a physical set-up consisting of nested controllers regulating the velocity of a permanent magnet synchronous motor under field-oriented control."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is somewhat incremental in the line of research on applying BO to safety-constrained control systems, and combines known ideas. Nevertheless, this combination required a significant technical insight, so it is far from obvious or trivial.\n\nThe experimental results on the physical system resulted in a significant number of constraint violations (39 for the method proposed by the authors). It is not clear what the consequences of these violations are in practice."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "In this paper, we propose SafeCtrlBO to optimize multiple controllers simultaneously and safely."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024safe,\ntitle={Safe Bayesian Optimization for Complex Control Systems via Additive Gaussian Processes},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=57iQSl2G2Q},\nnote={under review}\n}"
},
"abstract": {
"value": "Controller tuning and optimization have been among the most fundamental problems in robotics and mechatronic systems. The traditional methodology is usually model-based, but its performance heavily relies on an accurate mathematical system model. In control applications with complex dynamics, obtaining a precise model is often challenging, leading us towards a data-driven approach. While various researchers have explored the optimization of a single controller, it remains a challenge to obtain the optimal controller parameters safely and efficiently when multiple controllers are involved. In this paper, we propose SafeCtrlBO to optimize multiple controllers simultaneously and safely. We simplify the exploration process in safe Bayesian optimization, reducing computational effort without sacrificing expansion capability. Additionally, we use additive kernels to enhance the efficiency of Gaussian process updates for unknown functions. Hardware experimental results on a permanent magnet synchronous motor (PMSM) demonstrate that compared to existing safe Bayesian optimization algorithms, SafeCtrlBO can obtain optimal parameters more efficiently while ensuring safety."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Safe Bayesian Optimization",
"Complex Control Optimization",
"Additive Gaussian Processes"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/41632b7937b693e00d1d0cef4f8f73b2314b6653.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Safe Bayesian Optimization for Complex Control Systems via Additive Gaussian Processes"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
57xboRTbwI | Bias Analysis in Unconditional Image Generative Models | main | Active | image generative models;bias analysis;distribution shift | generative models | 3;3;3;6 | 5;4;3;4 | 2;2;2;3 | 1;2;2;2 | 2;3;3;3 | 3.75 | 4 | 2.25 | 1.75 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"Yes, Discrimination / bias / fairness concerns"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Do the accuracy rates reported in Figures 4 and 5 of the appendix refer to training set or validation set performance?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Problem Definition: The paper focuses specifically on studying the inductive bias of generative models themselves, avoiding other factors such as dataset bias and prompts, providing a novel perspective for analyzing bias sources.\n\n2. Methodology: Proposes a standardized bias evaluation framework that uses the same classifier for all label predictions, ensuring consistency in evaluation.\n\n3. Writing: The paper is well-structured and explains complex concepts in an understandable way."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates how inductive bias in unconditional generative models affects bias in generated results. The authors define bias shift as the difference between the probability of attribute presence in the training and generated distributions, and train a classifier to categorize attributes to quantify bias shift. Furthermore, attributes are categorized as subjective or non-subjective based on the position of the classifier's decision boundary. The author validates multiple models including diffusion models and GAN on two datasets, CelebA and DeepFusion, revealing related patterns."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Bias Definition: The paper's definition of bias, which only measures differences in attribute occurrence probabilities, may be overly simplistic. Bias typically encompasses more complex dimensions including social prejudices and systemic discrimination.\nOversimplified Metrics: The Average Bias Shift (ABS) metric may be too reductive as it:\n\n2. ABS doesn't consider correlations between attributes and ignores the varying social impact weights of different attributes\n\n3. The 0.01 threshold for subjective/non-subjective classification lacks justification, and there are no ablation studies on threshold selection. The data-driven categorization approach may overlook inherent social and ethical implications of attributes\n\n4. Relying on a single classifier may introduce classifier-specific biases. Figures 4 and 5 show relatively low accuracy (90% or below) for many attributes, questioning the reliability of the pre-trained classifier. It would be better to consider using Large Language Models as supplementary evaluators (Just a suggestion, no need to add experiments).\n\n5. The current model selection appears dated, primarily relying on ADM (2021) and BigGAN (2019) for experiments. They may not reflect the latest advances in generative modeling. The paper would benefit significantly from validating the proposed framework on more recent architectures, such as Stable Diffusion, DiT, PixArt-α for diffusion models, and StyleGAN3 for GANs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness point 1"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work presents a bias evaluation framework for unconditional image generative models.\n2. The authors proposed two taxonomies for categorizing bias shifts for different attributes.\n3. The authors experimented with different sizes of diffusion models to observe how bias shift is happening."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a framework for bias evaluation of unconditional image generative models. The authors measured the bias shift in the original and synthetic data and tested their framework in publicly available datasets. They found that, bias shift happens in image generative models and proposed two taxonomies to categorize the bias shift for different attributes. The paper is well-written and well-formulated. However, a comparison with the existing bias evaluation framework needs to be made."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. As this paper presents a bias evaluation framework for image dataset, it needs to be compared with other evaluation framework, i.e. compare with [1]. How is the presented framework differ with the [1]?\n\n2. Limitations of this evaluation framework should be discussed in the paper.\n\n#### References:\n\n[1] Wang, Angelina, et al. \"REVISE: A tool for measuring and mitigating bias in visual datasets.\" _International Journal of Computer Vision_ 130.7 (2022): 1790-1810."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "In general, the analysis of the bias shift of different generative models is interesting, and the pipeline's high-level idea seems to be sound. The subjective/non-subjective study is also interesting. The paper includes a vast amount of empirical results for the analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an evaluation pipeline to analyze the bias shift of different generative models. The generative models are trained on the training dataset, and an attribute classifier is also pretrained on the same dataset. The attribute prediction difference between the original dataset and the generated images measures the bias shift. The paper also separates the attribute into two categories, subjective and non-subjective, to further analyze the insight of the bias shift."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some of the discussion in the method section (sec. 3) seems to be redundant or not tied to the paper. For example, what is the purpose of introducing $P^{ideal}$? Although it is canceled in the final equation, I don't think it is necessary to introduce such a term because, intuitively, $|{P^{gen} - P^{val}}|$ itself is sufficient to measure the bias shift. Introducing extra and probably unnecessary assumptions may overcomplicate the method and lead to confusion. Also, Section 3.1 also introduces a definition of \"conditional bias,\" which is not discussed or studied in the rest of the paper. What is the purpose of introducing this definition? \n\n2. The paper claims (L142) that \"pre-trained models introduce their own biases, rendering the predicted labels unreliable for accurate bias evaluation.\" However, I disagree with this argument. I agree that the pre-trained model may be biased, but this reason does not invalidate them for performing attribute classification. Such a classifier serves as the expert in labeling attributes so that the most important criterion, if not the only criterion, should be classification accuracy. If any pre-trained models have outstanding attribute classification performance on the training/val dataset, I don't see why they shouldn't be used. Further, those pre-trained models can be finetuned on the training dataset (which this paper did) for an even better classification performance on specific datasets (e.g., CelebA), which can only benefit the bias shift analysis. \n\n3. Further, the accuracy of the classifier is not sufficient for the analysis. Although the accuracies of the majority of attributes are 90%+, there is still a considerable amount of attributes on which the classifier performs unsatisfying. This fact is critical to the analysis, considering the listed subjective attribute examples are placed in the lower portion of the performance list. Lower accuracy may suggest higher analysis noise and larger ABS measuring error. Since the ABS for non-subjective attributes and subjective attributes are ~1% and 3-5%, the classifier with 91.7% (lowest attribute: 68.34%) or 90.5% (lowest attribute: 71.65%) accuracy is not good enough. \n\n4. Further, the classifier is trained on the training set and directly applied to both training, valid, and generation sets. However, unlike training and valid sets are sampled from the same distribution, the generation set may have a different distribution than the original dataset. Thus, the classifier may suffer from distribution shift and/or visual domain generalization challenges, so the classifier may not be reliable on the generation set. This issue can further weaken the paper's analysis and conclusion. \n\n5. Although the attempt to split the attribute into subjective and non-subjective groups is interesting, I am not convinced that the splitting method used in the paper (decision boundary-based) is valid. The decision boundary is closely connected with classifier accuracy, which can be further connected with analysis noise and measuring errors. Thus, those attributes with unclear boundaries are more likely to have higher ABS errors. Additionally, this splitting may not match human's definition of \"subjective.\" Those \"subjective attributes\" to human definition (e.g., wearing glasses) may be easier to be classified so that they may have clearer boundaries. However, there is no guarantee of this, and the paper also does not have a complete list of subjective and non-subjective attributes to verify."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "None"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper analyzes an important topic of bias in generative models. These models have been shown to learn the biases from their datasets, and this paper proposes a new angle towards understanding these biases.\n\nThe paper is generally well written and easy to follow.\n\nThe detailed analysis of logits seems to not have been studied before."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces an analysis of non-conditional generative models, GANs and Diffusions, for image generation. By using a classifier trained on the same dataset, biases are identified as subjective and non-subjective attributes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "As a preliminary, I have reviewed this paper before for the NeurIPS SafeGEN workshop. I have reread this version of the paper, and my opinion has not changed. \n\nHere's my concerns:\n - Calling attributes subject vs non-subjective is very strange. For example, \"Pale skin\" or \"male\" in CelebA being a non-subjective attribute is surprising. I would be convinced if there were a user study to validate these attributes are similarly subjective to humans, but as it stands I'm not convinced.\n - The raw bias shift is strikingly small. The subjective logits on figure 5c look extremely similar between the synthetic versus real data. Especially when comparing 5c to 5e, it's surprising that Male landed in non-subjective and Smiling did not.\n - Using the same dataset for training the generator and classifier is very problematic: it's self-contamination. The bias studied in this paper could come from: the dataset itself, the generator's training/architecture, or the classifier's training/architecture. Given the generator and classifier are mapped to the same data distribution, the inherit biases are muddled between the two. I would have liked to seen dataset splits where half the data is used to train the generator and half the classifier. That would improve the self contamination issue substantially.\n - Finally, the actual take-aways from the paper are fairly limited. Assuming my previous point were address, the fundamental why question is not answered: why are some attributes represented more/less in the synthetic distribution. It is somewhat useful to know that some attributes are, but I would be very interested to know how to predict which attributes would be over/under represented by just training a classfier."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a standardized bias analysis framework to study bias shifts between generation and training data distributions for unconditional image generative models"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bias,\ntitle={Bias Analysis in Unconditional Image Generative Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=57xboRTbwI},\nnote={under review}\n}"
},
"abstract": {
"value": "The widespread usage of generative AI models raises concerns regarding fairness and potential discriminatory outcomes. In this work, we define the bias of an attribute (e.g., gender or race) as the difference between the probability of its presence in the observed distribution and its expected proportion in an ideal reference distribution. Despite efforts to study social biases in these models, the origin of biases in generation remains unclear. Many components in generative AI models may contribute to biases. This study focuses on the inductive bias of unconditional generative models, one of the core components, in image generation tasks. We propose a standardized bias evaluation framework to study bias shift between training and generated data distributions. We train unconditional image generative models on the training set and generate images unconditionally. To obtain attribute labels for generated images, we train a classifier using ground truth labels. We compare the bias of given attributes between generation and data distribution using classifier-predicted labels. This absolute difference is named bias shift. Our experiments reveal that biases are indeed shifted in image generative models. Different attributes exhibit varying bias shifts' sensitivity towards distribution shifts. We propose a taxonomy categorizing attributes as $\\textit{subjective}$ (high sensitivity) or $\\textit{non-subjective}$ (low sensitivity), based on whether the classifier's decision boundary falls within a high-density region. We demonstrate an inconsistency between conventional image generation metrics and observed bias shifts. Finally, we compare diffusion models of different sizes with Generative Adversarial Networks (GANs), highlighting the superiority of diffusion models in terms of reduced bias shifts."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"image generative models",
"bias analysis",
"distribution shift"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/59b87a7fce88a12e371563311e3805d6acdb54f6.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bias Analysis in Unconditional Image Generative Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
57yOS3nIVm | Divide and Conform: Unleashing Spatial Filter Atoms for Unsupervised Target Transferability | main | Active | Filter Decomposition;Domain Transferability;Efficiency | transfer learning, meta learning, and lifelong learning | 5;5;5 | 4;2;4 | 2;4;2 | 2;3;2 | 2;4;2 | 5 | 3.333333 | 2.666667 | 2.333333 | 2.666667 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.In this manuscript, the authors conduct a detailed comparison of the proposed method’s performance with SimCLR and LORA-style methods under a cross-domain few-shot learning setting. Would the task benefit from stronger parameter-tuning performance under the setups of SimCLR and LORA-style methods?\n\n2.Does the proposed method still demonstrate an advantage on more challenging benchmarks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Exploring new efficient fine-tuning methods for transferring diverse pre-trained models is meaningful. \n\n2. The paper provides a clear and logical description and definition of the proposed method.\n\n3. The approach achieves comparable results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Exploring the transfer of pre-trained model knowledge in cross-domain few-shot learning settings is valuable. This manuscript introduces \"Divide and Conform,\" a method designed to enhance the transferability of pre-trained convolutional neural networks (ConvNets) without relying on base data. The approach involves fine-tuning only the decomposed spatial filter atoms while keeping the atom-coefficients frozen, facilitating cross-domain transfer. Evaluated on multiple benchmark datasets, the proposed method demonstrates efficient knowledge transfer with minimal parameter adjustment."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The manuscript elaborates in detail on research progress in cross-domain few-shot learning and spatial filtering decomposition in the second section (Related Works). However, discussions of the latest related works are lacking. Additionally, the connections and distinctions between this manuscript and existing works in the field should be carefully explained.\n\n2. The arrangement of tables and figures should align with the textual content to facilitate reader comprehension and comparison.\n\n3. In the experimental section, the manuscript should include comparisons with the latest cross-domain few-shot learning methods. Additionally, SimCLR is a straightforward framework for contrastive learning of visual representations. The authors should compare their approach with more mainstream and efficient parameter-tuning methods, such as vision prompt tuning.\n\n4. The experimental results indicate that the proposed method does not outperform all baselines comprehensively. The authors should provide a detailed explanation of this."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please follow weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The paper is well written and easy to follow. \nDetailed anaylysis has been done on several datasets. \nSeveral quantitative results are presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors introduce Divide and Conform, aimed at augmenting the transferability of pre-trained\nconvolutional neural networks (ConvNets), in the absence of base data. \nIt is a two step process , spatial only convolutions and channel combination.\n\nAuthors claim that their approach is designed to enhance the adaptability of pre-trained models to specific\ntarget tasks, while assuming only a limited amount of unlabeled data is available for the target task and no access\nto the extensive base dataset, achieving all this in a parameter-efficient manner."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I take from the results on EuroSAT dataset message that the proposed method found it hard to learn discriminative features \nas compared to other methods.\nI would be great to see some qualitative results. \nI am new to this direction of research, for me, I am trying to see sparsity and your method together, how are they different or similar_"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How is LoDC implemented on ResNet18?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "+:In the context of parameter-efficient tuning, this paper decomposes convolutional kernels into two components, and focuses on fine-tuning spatial filter atoms while retaining existing knowledge to effectively transfer to target tasks. This approach provides a different perspective on fine-tuning tasks in convolutional layers\n\n+: The method is straightforward and easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper argues that directly fine-tuning pre-trained models carries the risk of insufficiently leveraging the foundational knowledge accumulated during pre-training, which may adversely affect performance on target tasks. To address this issue, this paper decomposes the convolution kernel into two components: spatial filter atoms and atom-coefficients. During the fine-tuning phase, this paper only fine-tunes the spatial filter atoms, thereby achieving fine-tuning with fewer parameters."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-: The motivation of this paper is not very clear. As stated in line 66-71, this paper points out three challenges: (1) lack of the base dataset, (2) heavy computational cost of full finetuning, and (3) scarcity of labeled target data. They are common issues and have been well studied in previous parameter-efficient tuning works. Therefore, what are advantages of this work over previous parameter-efficient tuning works?\n\n-: The technical contribution of this work could be further clarified. Particularly, compared to previous parameter-efficient tuning works, this paper relies on convolutional kernel decomposition techniques. Therefore, the authors would better discuss the advantages of the introduced convolutional kernel decomposition over existing works.\n\nAdditionally, dictionary learning for kernel decomposition involved in this work is complex and will bring significant computational overhead. More importantly, how about its scalability to large-scale convolutional neural works and vision transformers. Particularly, vision transformers are most widely used as backbones of pre-trained models.\n\nFor the experiment part, it seems to lack sufficient analysis on why only fine-tuning of the spatial filter atoms yields effective results. Furthermore, it also lacks experimental support on how freezing of the atom-coefficients preserves the knowledge of the pre-trained network. Additionally, the authors would better conduct more experiments with larger convolutional kernels and the larger model sizes to show the generalizability of the proposed methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "The paper presents Divide and Conform, a method parameter-efficient and interpretable framework for knowledge transferability of pre-trained ConvNets by selectively fine-tuning the spatial filter atoms."
},
"_bibtex": {
"value": "@misc{\npatel2024divide,\ntitle={Divide and Conform: Unleashing Spatial Filter Atoms for Unsupervised Target Transferability},\nauthor={Gaurav Patel and Qiang Qiu},\nyear={2024},\nurl={https://openreview.net/forum?id=57yOS3nIVm}\n}"
},
"abstract": {
"value": "The straightforward fine-tuning of the pre-trained model for the target task, bears the risk of under-utilizing the foundational knowledge accrued by the pre-trained model, resulting in the sub-optimal utilization of transferable knowledge, consequently impeding peak performance on the target task. To address this, we introduce $\\textit{Divide and Conform}$, aimed at augmenting the transferability of pre-trained convolutional neural networks (ConvNets), $\\textit{in the absence of base data}$. This strategy exploits the mathematical equivalence of the convolution operation, conceptualizing it as a two-step process involving spatial-only convolution and channel combination. To achieve this, we decompose ($\\textit{Divide}$) the filters of pre-trained ConvNets into spatial filter atoms (responsible for spatial-only convolution) and their corresponding atom-coefficients (responsible for channel combination). Our observations reveal that solely fine-tuning ($\\textit{Conform}$-ing) the spatial filter atoms, comprising of only a few hundred parameters, renders the transferability of the model efficient, without compromising on the predictive performance. Simultaneously, the static atom-coefficients serve to retain the base (foundational) knowledge from the pre-trained model. We rigorously assess this dual-faceted approach within the demanding and practical framework of cross-domain few-shot learning, showcasing the approach's substantial capability of transferring the knowledge in a parameter-efficient manner."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Gaurav_Patel2",
"~Qiang_Qiu1"
]
},
"authors": {
"value": [
"Gaurav Patel",
"Qiang Qiu"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Filter Decomposition",
"Domain Transferability",
"Efficiency"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "patel|divide_and_conform_unleashing_spatial_filter_atoms_for_unsupervised_target_transferability"
},
"pdf": {
"value": "/pdf/9a88de4c6fcc7ba70d7835a9c2999c13a6b5188a.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Divide and Conform: Unleashing Spatial Filter Atoms for Unsupervised Target Transferability"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
58AhfT4Zz1 | Causal-aware Graph Neural Architecture Search under Distribution Shifts | main | Active | Graph Neural Architecture Search;Out-of-Distribution Generalization;Causal Learning | learning on graphs and other geometries & topologies | 3;5;6;6 | 5;4;3;3 | 1;3;3;2 | 2;2;3;2 | 3;3;2;3 | 5 | 3.75 | 2.25 | 2.25 | 2.75 | -0.984732 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In Line 113, $N_{tr}$ and $N_{te}$ are not explicitly defined, though their meanings seem clear from the context. Please clarify these terms in the final manuscript.\n\n2. How are the subgraphs in Eq. 6 represented (e.g., soft edge mask or hard crop)? Where and how are they used in subsequent steps?\n\n3. To enhance the clarity of your approach, it would be helpful to visualize the causal and non-causal subgraphs for each dataset used in the case studies."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Well-structured modular approach: The CARNAS framework is thoughtfully organized, with each component clearly contributing to improved generalization under distribution shifts.\n\n2. Robust experimentation: The paper includes extensive experiments across synthetic and real-world datasets, highlighting the robustness of the proposed method.\n\n3. Component-level contribution clarity: Each module’s individual contribution is demonstrated, providing transparency and supporting the effectiveness of the approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel method, Causal-aware Graph Neural Architecture Search (CARNAS), to enhance the generalizability of Graph Neural Network (GNN) architectures under distribution shifts. By discovering stable causal relationships between graph structures and GNN architectures, CARNAS aims to mitigate issues with spurious correlations that often degrade performance across varying distributions. CARNAS introduces three core modules: Disentangled Causal Subgraph Identification, Graph Embedding Intervention, and Invariant Architecture Customization. Experimental results on both synthetic and real-world datasets show significant performance gains, especially in out-of-distribution generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Clarity in Section 3.3: Given I’m having limited familiarity with Graph NAS, the dynamic graph neural network architecture production and optimization process described in Section 3.3 remains somewhat unclear for me. A visual representation and a more detailed explanation would significantly improve the paper's readability.\n\n2. Causal-Aware Solution's Justification: While the paper presents a causal-aware solution for handling distribution shifts, some aspects require stronger theoretical support to underscore the novelty and significance of the approach:\n\n2.1. Limited Theoretical Support: The causal-aware Graph NAS solution leans heavily on implementation specifics, which limits the theoretical grounding of the method and may impact the perceived novelty.\n\n2.2. Reliability of Causal Subgraph in Latent Space: The representation of causal subgraphs in latent space is an interesting approach; however, it is not entirely clear if the model reliably learns the true causal components or overfits to the training set to optimize the objective.\n\n2.3. Overlap with Prior Work: Section 3.1 closely mirrors aspects of PGExplainer [26], which limits the novelty of this part of the approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In the graph embedding intervention module, you use $\\mu$ to control intervention intensity in Eq.(10): $H_{v_j} = (1-μ)·H_c + μ·H_{s_j}$. Have you considered using adaptive intervention strategies where $\\mu$ varies based on the structural properties of $G_c$ and $G_s$? This could potentially better handle graphs with varying degrees of spurious correlations.\n2. The overall objective function (Eq.(17)) uses a linearly growing $\\sigma_p$ corresponding to epoch number. Could you elaborate on why linear growth was chosen over other schedules (e.g., exponential, step-wise)? How does the schedule of $\\sigma_p$ affect the trade-off between causal structure learning and architecture optimization?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is the first to study Graph NAS under distribution shifts using causality. The problem is well-motivated with clear real-world relevance and applications. \n2. The authors present comprehensive experiments on both synthetic and real-world datasets that demonstrate clear performance improvements over existing baselines. The thorough ablation studies effectively validate each component of the proposed method, and the analysis provides valuable insights into the model's behavior.\n3. The paper is well-structured. The experimental analysis is clearly presented, making the work reproducible."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenge of graph neural architecture search (Graph NAS) under distribution shifts. The authors observe that existing Graph NAS methods fail to generalize well when there are distribution shifts between training and testing data, since they may exploit spurious correlations that don't hold across distributions. To tackle this, they propose CARNAS (Causal-aware Graph Neural Architecture Search), a novel approach that discovers and leverages causal relationships between graphs and architectures. \n\nThe main contributions of this work is 1) it is the first work to study Graph NAS under distribution shifts from a causal perspective; 2) they propose a novel framework with a disentangled Causal Subgraph Identification to find stable predictive subgraphs, a Graph Embedding Intervention component to validate causality in latent space and Invariant Architecture Customization to handle distribution shifts. Comprehensive experiments on synthetic and real-world datasets showing superior out-of-distribution generalization compared to existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the paper shows good performance on the tested datasets, it lacks a detailed analysis of computational complexity and memory requirements. Specifically, the time complexity of $O(|E|(d_0 + d_1 + |O|d_s) + |V|(d_0^2 + d_1^2 + |O|d_s^2) + |O|^2d_1)$ could become prohibitive for very large graphs. The authors should discuss how their method performs on graphs with millions of nodes and edges, which are common in real-world applications like social networks.\n2. The method requires careful tuning of four critical hyperparameters ($t$, $\\mu$, $\\theta_1$, $\\theta_2$), which may significantly impact performance. In particular, the edge importance threshold t in Eq.(6) and the intervention intensity $\\mu$ in Eq.(10) show high sensitivity in experiments. While the authors provide some sensitivity analysis on BACE dataset, they don't fully explain how to effectively tune these parameters for new datasets or application domains. \n3. The paper lacks formal theoretical guarantees for the causal discovery process. While the empirical results are strong, the authors should clarify under what conditions their method is guaranteed to identify true causal relationships and provide bounds on the probability of discovering spurious correlations. Additionally, the relationship between the intervention loss and causal invariance could be more rigorously established."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could you explain how NAS can guide GNNs to model specific data causal relationships?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. This paper innovatively proposes using NAS to address the problem of causal information identification in graph data. \n2. The paper conducts extensive experiments to validate the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The study proposes a novel method called Causal-aware Graph Neural Architecture Search (CARNAS) to address the challenges posed by distribution shifts in the process of Graph Neural Architecture Search (Graph NAS). Existing methods face limitations when handling distribution shifts in real-world scenarios, as the correlations they exploit between graphs and architectures are often spurious and subject to variation across different distributions. CARNAS aims to discover and leverage the causal relationship between graphs and architectures to search for optimal architectures capable of maintaining generalization under distribution shifts."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe the paper does not clearly explain why NAS can help adjust GNNs to identify causal information, which I consider the main issue of the paper. In my view, NAS optimizes the structure of GNNs, enhancing their efficiency or expressiveness, but it does not inherently enable GNNs to determine what type of data to model. At the very least, the authors did not provide a clear explanation of this point in the paper. \n2. The paper lacks theoretical justification for the regulatory capability of NAS. \n3. The survey and introduction of related work are insufficient."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you conduct an ablation study focusing on the neural network search details provided in the added Appendix C.1? \nSpecifically, I'm interested in understanding:\n- The effectiveness of different backbones to OOD distribution shifts, possibly illustrated through weight distributions.\n- How about time and memory requirements during the search process?\n- Are all of the backbones crucial for effective OOD distribution handling? \n\n2. How does your method behave in the large graph compared to the previous fixed-network methods since you may search in a large network space? Will it be out of memory or the time computation will exponentially grow?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The application of neural architecture search to address graph out-of-distribution (OOD) problems represents a significant innovation compared to traditional fixed-backbone approaches, as demonstrated in Appendix G.3. This shift from static to adaptive architectures opens new possibilities for graph OOD generalization.\n2. The method's effectiveness is convincingly demonstrated through comprehensive experiments on SPMotif and OGBG-Mol* datasets, where it consistently outperforms existing approaches in handling distribution shifts.\n3. The paper stands out for its clear and organized presentation, featuring well-designed figures that effectively illustrate complex concepts, complemented by rigorous mathematical formulations that provide a solid theoretical foundation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents CARNAS (Causal-aware Graph Neural Architecture Search), a novel approach that addresses the challenge of distribution shifts in Graph Neural Architecture Search. \nUnlike existing methods that rely on potentially spurious correlations between graphs and architectures, CARNAS focuses on discovering and leveraging causal relationships to achieve better generalization.\n\nThe solution consists of three main components:\n\n1. Disentangled Causal Subgraph Identification: Discovers subgraphs with stable predictive capabilities across different distributions\n2. Graph Embedding Intervention: Works in latent space to preserve essential predictive features while removing non-causal elements\n3. Invariant Architecture Customization: Reinforces causal invariance and uses it to design generalized architectures\n\nThe approach's effectiveness is validated through experiments on both synthetic and real-world datasets, demonstrating superior out-of-distribution generalization compared to existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Computational Efficiency Concerns: Section 3.3 reveals a potential limitation. The method searches through an extensive space of graph neural networks across all layers, which could be computationally more intensive than existing baselines. This increased computational and memory overhead might present challenges when applied to large-scale graphs.\n2. Limited Experimental Validation: While DIR is included as a baseline, other important causal subgraph-based methods from recent works (such as [1],[2]) are not considered. \nAdditionally, the evaluation datasets - SPMotif (synthetic) and OGBG-Mol* (relatively small graph sizes) - leave questions about scalability. \nIt would be valuable to see performance on larger-scale datasets like DrugOOD or GOOD to address concerns about memory consumption and computational time.\n3. Novelty Discussion:\nWhile the neural architecture search component is innovative, the underlying methodology shares significant similarities with existing graph OOD approaches like DIR and related works [1-3]. The core mechanisms - using weighted top-k for causal subgraph identification and random combination for spurious subgraph intervention - closely parallel previous methods. This raises questions about the method's novelty beyond the architecture search component.\n[1] Learning causally invariant representations for out-of-distribution generalization on graphs. \n[2]Learning invariant graph representations for out-of-distribution generalization.\n[3] Improving subgraph recognition with variational graph information bottleneck"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024causalaware,\ntitle={Causal-aware Graph Neural Architecture Search under Distribution Shifts},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=58AhfT4Zz1},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph neural architecture search (Graph NAS) has emerged as a promising approach for autonomously designing graph neural network architectures by leveraging the correlations between graphs and architectures. However, the existing methods fail to generalize under distribution shifts that are ubiquitous in real-world graph scenarios, mainly because the graph-architecture correlations they exploit might be spurious and varying across distributions. In this paper, we propose to handle the distribution shifts in the graph architecture search process by discovering and exploiting the causal relationship between graphs and architectures to search for the optimal architectures that can generalize under distribution shifts. The problem remains unexplored with the following critical challenges: 1) how to discover the causal graph-architecture relationship that has stable predictive abilities across distributions, 2) how to handle distribution shifts with the discovered causal graph-architecture relationship to search the generalized graph architectures. To address these challenges, we propose a novel approach, Causal-aware Graph Neural Architecture Search (CARNAS), which is able to capture the causal graph-architecture relationship during the architecture search process and discover the generalized graph architecture under distribution shifts. Specifically, we propose Disentangled Causal Subgraph Identification to capture the causal subgraphs that have stable prediction abilities across distributions. Then, we propose Graph Embedding Intervention to intervene on causal subgraphs within the latent space, ensuring that these subgraphs encapsulate essential features for prediction while excluding non-causal elements. Additionally, we propose Invariant Architecture Customization to reinforce the causal invariant nature of the causal subgraphs, which are utilized to tailor generalized graph architectures. Extensive experiments on synthetic and real-world datasets demonstrate that our proposed CARNAS achieves advanced out-of-distribution generalization ability by discovering the causal relationship between graphs and architectures during the search process."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Architecture Search",
"Out-of-Distribution Generalization",
"Causal Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0f2d22be6f71768e43e5dd5666b1c58a3f9b517a.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/abbed6c190e47b42a29b1e9829cef8c57f72df2b.pdf"
},
"title": {
"value": "Causal-aware Graph Neural Architecture Search under Distribution Shifts"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
58KF6ne6d4 | Kinematics-Informed Reinforcement Learning for Trajectory Optimization in CNC Machining | main | Withdraw | Trajectory Optimization;Reinforcement Learning;CNC Machining | applications to robotics, autonomy, planning | Jin Zhang;Mingyang Zhao;XIN JIANG;Dong-ming Yan | ~Jin_Zhang15;~Mingyang_Zhao1;~XIN_JIANG12;~Dong-ming_Yan1 | 1;3;3;5 | 5;3;4;4 | 2;3;2;2 | 1;2;2;2 | 3;3;3;2 | 3 | 4 | 2.25 | 1.75 | 2.75 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The time component of the optimization is removed when we move from Equation 5 to Equation 7, is it intentional and why?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Interesting idea of using RL to solve CNC routing problem which is traditionally solved by hand crafted algorithms"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a RL approach to improve smoothness and efficiency in following CNC machining toolpath. In CNC machining, the toolpath is defied as a G01 code which is a series of points. Path between intermediate points are straight line segments connecting them. The junctions introduce discontinuity in velocity and acceleration. Traditional approaches first smooth the trajectory, then adjust the tool path velocity to accommodate maximum velocity, acceleration and jerk constraints. This de-coupling can introduce inefficiencies. Hence, the authors propose a coupled optimization approach leveraging RL."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Interesting study but not enough evidence to show usefulness. Given the topic of the paper, it either demands real world results (e.g. higher quality CNC output, faster toolpath, etc) or very strong evidence in simulated results.\n2. No real world results. Simulated results are also not very strong. E.g. authors mention in Line 43-45\"..decoupled approach often yields suboptimal results..limiting the achievable feedrate\". However, in 2/4 toolpaths, the proposed method generates slower toolpath than existing methods.\n3. Given the additional time and complexity with RL based optimization, one would use it only if there is a strong reason, which is missing in the current paper.\n\nMinor:\n1. 140-141: udden->sudden\n2. 157-158: I believe there is a missing bracket"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why is the kinematic state prediction problem an MDP? Why does it have the Markov property? What is the state space of this MDP?\n2. Why is the duration of each segment optimized separately? Is it possible to jointly optimize the sum of durations of all segments? Is the reward function still well-defined in this case?\n3. How do existing integrated methods solve toolpath smoothing and federate planning? Why is RL superior to these methods? How does the proposed method perform compared to these integrated methods?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The problem this paper aims to solve, i.e., integrated toolpath smoothing and federate planning, is rooted in real-world manufacturing. It is important for improving machining accuracy, efficiency, and tool life. The proposed method of using RL for trajectory optimization is original, and its performance is better than traditional decoupled approaches. The writing of the paper is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a reinforcement learning (RL) method called KIRL for integrated joint toolpath smoothing and feedrate planning in Computer Numerical Control (CNC) machining. The tool trajectories are divided into segments by a series of boundary points and quintic polynomial functions between them. The RL agent is trained for predicting kinematic states at the boundary points, which are then used for polynomial interpolation. The duration of each segment is separately optimized by maximizing the reward function. Experimental results demonstrate that KIRL can generate smoother trajectories and optimize machining time compared to traditional decoupled methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not convince me why the kinematic state prediction problem is an MDP. In particular, why does it have the Markov property? The authors defined an observation space, but what they need to define is a state space that makes the problem an MDP. From my understanding, some elements in the current observation space already violates the Markov property. For example, the segment length and turning angle in the next step cannot be determined by observation or action at the current step. The authors are suggested to explicitly justify how their formulation satisfies the Markov property, and to clarify the distinction between their observation space and the underlying state space of the MDP.\n\n2. The duration of each segment is computed by minimizing the reward function of that segment. This is not optimal because the objective is to minimize the total machining time, which should be a joint minimization on the sum of durations of all segments. From my understanding, the authors do separate optimization because solving future time duration requires future kinematic states, which are not available at the current step. If this is true, it reinforces the doubt whether the problem is an MDP because now the reward function also depends on future states. In addition, the authors are suggested to discuss the trade-offs of their approach versus joint optimization, and to clarify how their method approximates or relates to the global optimum.\n\n3. The authors mentioned that there are some recent studies formulating the integration of toolpath smoothing and federate planning as a holistic problem, but they did not explain how these methods solve the problem, nor did they compare the proposed method with them in the experiments. It is unclear whether and why the proposed method is superior to existing integrated methods. The authors are suggested to include a brief overview of how existing integrated methods work, and a comparative evaluation against at least one state-of-the-art integrated approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the simulations, somethimes the KIRL-PPO shows better results, and sometimes the KIRL-SAC. Are there any criteria to decide when to use PPO or SAC?\n\n2. Does the method guarantee that there is no constraint violation?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Applying RL-based methods for improvement of trajectory tracking in CNC machining is novel for this applicaiton domain. The approach is original and clearly explained in the paper. I appreciate the provided algorithm and the paper's presentation in general."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes reinforcement-learning based method to solve the integrated integrated toolpath smoothing\nand feedrate planning problem in CNC machining. PPO and SAC are used to train RL agents to predict intermediate kinematic states. To generate the trajectory, the target is to perform an integrated optimization to find a trajectory that minimizes a weighted sum of both the trajectory jerk (related to smoothing of the path) and the machining time, taking into account kinematic constraints. Then RL finds intermediate kinematic states at the path segment boundaries. The method is evaluated on four tool paths and shows generally better performance than the benchmark methods used in the evaluation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The authors solve the simultaneous problem of smoothing a path given by linear segments and optimizing the feed rate (velocity) of the CNC machine. In their literature study, they omit existing work on this problem (see references). The problem can be seen in two different ways: 1) improving the performance of the system, by improving both the time duration and the position error; and 2) solving mathematically the smoothing problem together with feed rate optimization, which is the preferred one in this paper. For both problems, there is relevant literature which is omitted. I provide a couple of references, addressing both. It would be useful to include that in the literature review.\n\nZhang, Y., Wang, T., Peng, P., Dong, J., Cao, L. and Tian, C., 2021. Feedrate blending method for five-axis linear tool path under geometric and kinematic constraints. International Journal of Mechanical Sciences, 195, p.106262.\n\nLiu, B., Xu, M., Fang, J. and Shi, Y., 2020. A feedrate optimization method for CNC machining based on chord error revaluation and contour error reduction. The International Journal of Advanced Manufacturing Technology, 111, pp.3437-3452.\n\nKim, H. and Okwudire, C.E., 2020. Simultaneous servo error pre-compensation and feedrate optimization with tolerance constraints using linear programming. The International Journal of Advanced Manufacturing Technology, 109, pp.809-821.\n\nA. Rupenyan, M. Khosravi and J. Lygeros, \"Performance-based Trajectory Optimization for Path Following Control Using Bayesian Optimization,\" 2021 60th IEEE Conference on Decision and Control (CDC), Austin, TX, USA, 2021, pp. 2116-2121, doi: 10.1109/CDC45484.2021.9683482.\n\n\n2) Even when the preferred way of addressing the performance problem is somoothing+feed rate optimization, some quantification of the tracking error is missing. It would be useful to see what is the effect of the approach on the positioning performance.\n\n3) While there is a comparison with some approaches treating the feed rate and the path generaiton separately, there is no comparison with approaches treating the problems jointly (see references above). If such a comparison is added, I would be willing to increase my rating of the paper.\n\n4) There is no quantification or discussion of the computational performance of the method. Is it intended to be used offline?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- A motivation is missing why this is treated as a sequential decision making problem rather than a global optimization problem. I'd assume better global performance can be achieved when optimizing over the whole trajectory rather than considering the past as fixed and only considering the next straight segment.\n- So more generally: Why is the optimization done per segment? In the initial parts with the optimization problem that can lead to arbitrarily bad performance on subsequent segments, in the RL formulation this is accounted for as it optimizes for the cumulative reward.\n- Throughout the paper costs/constraints/optimization objectives are mixed in terms of \"global\" (whole trajectory) and \"local\" per segment, e.g. the ones in Eq (3) vs (4). That makes it rather hard to follow.\n- The constraints and objectives change throughout the paper - which makes it rather hard to follow. In Sect. 2.2 we have constraints on velocity, acceleration and jerk, in Sect. 3.1 that becomes a minimization of the jerk (while complying to the constraint on chord error, which intuitively would mean the jerk gets minimized at the cost of reaching the chord error limit), and then in Sect. 3.3 for the reward function we get a weighted combination of minimizing the chord error and violations of the jerk limit.\n- How are constraints ensured when learning? In the reward function constraint violations just seem to be modeled as costs, so there is no guarantee that they don't get violated, i.e., soft constraints rather than hard constraints.\n- Sect. 3.1 and 3.2: While quintic polynomial functions can indeed solve the problem for the constraints as defined by the authors, it remains very vague why that is the best idea. As far as I understood with this formulation, perfectly straight lines (which would be desirable for many mechanical parts) are not possible, and the optimized path can be quite far of. Wouldn't a higher order representation (or a different kind of spline, joining straight lines with transition 'pieces' at the corners, etc.) allow to follow the desired path more accurately? More generally: what are the implications/consequences of the design choices?\n- Sect. 3.2 / Fig. 3: The result that the slower the CNC moves the more distorted paths we get is highly counterintuitive. If the objective is to avoid the limits/constraints then simply moving slower should reduce all three velocity, acceleration, and jerk. This seems to be more an artifact of keeping the boundary conditions fixed. And I also don't believe the trade-off (longer time = more traj distortion but greater velocity 'smoothness') is general - already in your plot we see that when going from T=4 to T=5 the trajectory gets more distorted, but also the velocity peak on the right (t = 3.2 and 3.7) becomes worse (i.e., the trajectory needs to accelerate drastically towards the end to achieve the boundary constraints), which seems to invalidate the claim in the paper. My feeling is that the effects will depend quite a bit on the combination of boundary conditions that the range of T you consider, and that drawing the conclusion about the trade-off based on a single example/figure isn't warranted. Simple example: Boundary conditions and T are chosen in a way that the path is a simple straight line with constant velocity (and zero acceleration and zero jerk) so zero chord error, then both increasing and decreasing T will require some non-zero acceleration and potentially require path deviations.\n- Eq (11): What is the purpose of having N-i as part of the state, but no indication of which segment we are currently in?\n- Eq (12): p^L and p^R are shown in Fig 2 but don't seem to be explained in the text. Nor does it become clear how far away they can be from p. \\delta_max? But then how do you ensure that the curved segment in between doesn't protrude outside that limit?\n- l. 295: I assume the absolute value of the jerk j_i(t) should be used in r_i^jerk\n- Algo 1: only shows policy execution, I think it would be interesting to also show the training procedure\n- Sect. 4.1: \"due to unavailability of baseline implementations\" for PPO and SAC there are quite a few implementations available (e.g. Stable Baselines), or do you mean something else?\n- Sect. 4.2: Why is the chord error not evaluated/reported? I think this would be crucial to see at which accuracy cost the improved other metrics come. E.g. in the Fig 5 inset, the path error seems to have increased quite a lot - if we want to have accurate points.\n- I'm not from the CNC field, but are these decorative shapes really representative for many tasks? I'd assume for more technical applications accurate straight lines and sharp corners are actually crucial.\n- Table 1: It is unclear what we see here. Single RL run? For RL papers it is crucial to report statistics over several runs (mean + std).\n- Sect. 4.3: I really would have liked to see some ablations on your method (rather than only 2 different RL algorithms) and sensitivity analysis on the various parameters there are in the approach (e.g. reward weights).\n- Sect. 4.4: The state and action space was designed with generalization in mind, now figuring out that it needs to be retrained for all paths after all is disappointing. Related to this, it would have been nice to see some results on the performance without retraining.\n- A few typos, e.g. l. 140 \"udden\""
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is generally well written and does a good job at explaining the problem. The paper tackles and important, practical problem in CNC. The novel method is presented clearly. The method seems correct and sound. The experiments show good results compared to the baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes to apply reinforcement learning to jointly solve the feedrate planning and path smoothing problem for CNC machines.\nThe paper proposes a formulation for the problem, including reward function, state space, and action space/policy parametrization. The proposed approach is compared in simulation on four 2D trajectories against non-RL baselines, and shown to outperform those."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the paper tackles and important practical problem, I did not see any methodological novelty. It is an application of very standard RL algorithms to a novel problem. The specific formulation of the optimization objective/reward function, state space, and action space/policy representation seem novel, but I could not extract any more general lessons learned from them. There is nothing per se wrong with the design choices made and the resulting overall system seems to work. However, there are quite a few choices where alternatives would be possible and insights into why certain choices were made are lacking - I would have expected at least some ablations in the experiments. As the paper itself points out, in its current form the results are very far from practical applicability and rather a proof of concept.\nI also have a few doubts about details of the method and paper, and about the evaluation. See questions below.\n\nTo sum up, overall there is neither an algorithmic contributions, nor sufficient general insights for a systems paper. The experimental results also do not comply with basic standards for RL papers."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Kinematics-Informed Reinforcement Learning (KIRL) optimizes CNC machining by integrating toolpath smoothing and feedrate planning for improved accuracy and efficiency."
},
"_bibtex": {
"value": "@misc{\nzhang2024kinematicsinformed,\ntitle={Kinematics-Informed Reinforcement Learning for Trajectory Optimization in {CNC} Machining},\nauthor={Jin Zhang and Mingyang Zhao and XIN JIANG and Dong-ming Yan},\nyear={2024},\nurl={https://openreview.net/forum?id=58KF6ne6d4}\n}"
},
"abstract": {
"value": "Toolpath smoothing and feedrate planning are key techniques in Computer Numerical Control (CNC) machining, and play a significant role in machining accuracy, efficiency, and tool life.\nTraditional methods typically decouple path smoothing from feedrate planning, without considering the kinematic constraints during the smoothing process.\nAs a result, the subsequent feedrate planning process is subject to more stringent kinematic limitations, which hinders the achievement of optimal speed execution.\nHowever, the integration of these two processes presents a significant challenge due to severe complexity and nonlinearity of the problem. Here, we propose a novel Reinforcement Learning (RL) based method, termed KIRL, to address the integrated optimization problem.\nExperimental results demonstrate that KIRL can generate smoother trajectories and optimize machining time compared to traditional decoupled methods.\nTo our best knowledge, KIRL is the first RL-based method for solving the integrated toolpath smoothing and feedrate planning optimization problem in CNC machining."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Jin_Zhang15",
"~Mingyang_Zhao1",
"~XIN_JIANG12",
"~Dong-ming_Yan1"
]
},
"authors": {
"value": [
"Jin Zhang",
"Mingyang Zhao",
"XIN JIANG",
"Dong-ming Yan"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Trajectory Optimization",
"Reinforcement Learning",
"CNC Machining"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "zhang|kinematicsinformed_reinforcement_learning_for_trajectory_optimization_in_cnc_machining"
},
"pdf": {
"value": "/pdf/ea8eb8bc327766724662b5832f05eca9199f1c34.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Kinematics-Informed Reinforcement Learning for Trajectory Optimization in CNC Machining"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
58T7xcTxJD | Dual-level Affinity Induced Embedding-free Multi-view Clustering with Joint-alignment | main | Active | Mulit-view Clustering;Large-scale Clustering;Anchor Clustering | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5;6 | 5;4;5;3 | 3;3;3;3 | 2;2;2;3 | 2;3;2;3 | 4.25 | 4.25 | 3 | 2.25 | 2.5 | -0.522233 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.\tWhat is the difference between the anchor alignment module with those of existing works?\n2.\tWhy do some compared methods exhibit extremely poor performance on some datasets?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThe paper is well-structured, and the authors conduct a relatively comprehensive review on existing literatures.\n2.\tThe experimental results demonstrate the effectiveness of the work"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, a multi-view clustering method with joint anchor alignment was developed, which introduces dual-level affinity and achieves embedding-free clustering. The work is designed to address several problems due to the anchor misalignment issues. Therefore, the authors introduce a permutation mechanism for each view to jointly adjust the anchors. Besides, the method is free of learning the embedding by constructing the cluster labels directly from original samples. A self-expression learning structure is utilized on the anchors, which utilizes topology learning strategy to feed captured anchor-anchor features into anchor-sample graph. Extensive experiments validate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tA core idea of the work is to introduce an anchor permutation matrix, while this idea has been widely adopted by previous works. Hence, the novelty of the paper might not be sufficient to be published.\n2.\tThe comparison methods lack some latest works. Since the work is an anchor alignment based method, more related works with anchor alignment should be compared. For example, the reference Liu 2024 (in line 581) was discussed in this paper, which includes anchor alignment mechanism, but it is not compared with the proposed work.\n3.\tIn Table 1, several compared methods exhibit extremely poor performance on some datasets (e.g., PMSC on Cora, AMGL on DeRMATO). It might be better if the authors could explain the possible reasons.\n4.\tTable 5 does not include all the symbols. The Methodology section might be too brief, which should be introduced with more details by explaining the reasons for the design of each component."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Same as weaknesses section"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The model’s dual-level affinity mechanism effectively captures both anchor-sample and anchor-anchor relationships, enhancing clustering accuracy by leveraging a fuller view of the data structure.\n2. The flexible joint-alignment method addresses anchor misalignment issues without requiring a fixed baseline view, making the model versatile for clustering data from different sources.\n3. The model's effectiveness is demonstrated through comprehensive evaluation on multiple datasets, highlighting its adaptability and strong performance across different data types and views."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the DLA-EF-JA model, a multi-view clustering technique that leverages dual-level affinity to capture both anchor-sample and anchor-anchor relationships within data. The model introduces a joint-alignment mechanism to address the anchor misalignment problem across views, which eliminates the need for a baseline view. Unlike traditional embedding methods, DLA-EF-JA generates cluster labels directly, reducing variance and improving clustering stability. Extensive experiments across diverse datasets demonstrate that the proposed model achieves competitive performance compared to existing multi-view clustering methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Learning of Cross-View Complementarity:** While the model integrates anchor relations, it lacks complex constraints like the Schatten p-norm that could help capture deeper cross-view complementarities. This may limit the model’s ability to fully leverage unique, complementary information in views with highly distinct features or dimensions. How does the model handle scenarios where the quality of anchors varies significantly across different views?\n\n2. **Necessity of Anchor Alignment:** The reliance on anchor alignment to maintain cross-view consistency introduces additional computational steps. Although this approach appears beneficial, some recent multi-view clustering methods successfully avoid alignment through feature space fusion or shared representations. It would be useful for the authors to elaborate on the essential role of anchor alignment in this model and under what conditions it might be adapted or simplified. Are there specific conditions or datasets where the necessity of anchor alignment might be relaxed or modified?\n\n3. **Complexity of the Model:** The model is somewhat complex, introducing more variables and mathematical processes. A more detailed explanation of the transition from Equation 2 to Equation 3 would enhance reader understanding of the methodology.\n\n4. **Hyperparameter Tuning Requirement:** The model’s performance is sensitive to carefully tuned hyperparameters, such as λ and β. Can the authors provide further insights into the potential effects of anchor noise and how it could be mitigated to improve robustness?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The authors should check the data since some methods, such as OrthNTF and GSC, since the performance of these new methods is very poor."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) The proposed method considers the affinity relationship between anchors.\n(2) The proposed method devises a joint-alignment mechanism that not only eliminates the need for selecting the baseline view but also coordinates well with the generation of anchors.\n(3) The proposed method has linear complexity for the loss function."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to address these problems: (1) they existing methods focus only on the affinity relationship between anchors and samples, while overlooking that between anchors; (2) the cluster order is inconsistent across views and accordingly anchors encounter misalignment issue due to the lack of data labels. The proposed method explicitly exploits the geometric properties between anchors via self-expression learning skill, and utilizes topology learning strategy to feed captured anchor-anchor features into anchor-sample graph so as to explore the manifold structure hidden within samples more adequately. Experiments on multiple publicly available datasets confirm the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The novelty of this work is limited since the involved components have been widely used for anchor learning and spectral clustering. The authors only perform these components on the anchor data.\n(2) The authors do not compare the proposed method with theses popular deep learning ones."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThis paper is easy to read.\n2.\tExtensive experiments are conducted to show the effectiveness of the method as well as efficiency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a dual-level affinity induced embedding-free multi-view clustering method with joint alignment, called DLA-EF-JA. Based on previous anchor based multi-view clustering, it further considers the relations among anchors by learning an affinity matrix that are used to guide the anchor matrix learning with graph Laplacian. The multi-view anchors are adaptively aligned. The discrete cluster indicator is also jointly learned."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe novelty of this paper is incremental. The authors consider the relations among samples by self-expression affinity learning, and add a graph based Laplacian for anchor matrix regularization. However, the self-expression affinity learning and graph based Laplacian are widely used in existing subspace clustering works. It also remains unclear why the anchor self-expression enhances the quality of anchors. \n\n2.\tWhy learn an anchor affinity matrix $S_p$ for each view separately? It seems to overlook inter-view interactions. Why not directly learn a consensus anchor affinity matrix? Will it improve the performance?\n\n3.\tHow do you set the number of anchors $k$? What is the influence of it?\n\n4.\tThe experimental results are not convincing. For instance, OrthNTF achieves 69.4% Acc and 68.6% NMI values on the Reuters dataset, while this paper only reports 28.67% Acc and 3.07% NMI."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024duallevel,\ntitle={Dual-level Affinity Induced Embedding-free Multi-view Clustering with Joint-alignment},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=58T7xcTxJD},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite remarkable progress, there still exist several limitations in current multi-view clustering (MVC) techniques. Specially, they generally focus only on the affinity relationship between anchors and samples, while overlooking that between anchors. Moreover, due to the lack of data labels, the cluster order is inconsistent across views and accordingly anchors encounter misalignment issue, which will confuse the graph structure and disorganize cluster representation. Even worse, it typically brings variance during forming embedding, degenerating the stability of clustering results. In response to these concerns, in the paper we propose a MVC approach named DLA-EF-JA. Concretely, we explicitly exploit the geometric properties between anchors via self-expression learning skill, and utilize topology learning strategy to feed captured anchor-anchor features into anchor-sample graph so as to explore the manifold structure hidden within samples more adequately. To reduce the misalignment risk, we introduce a permutation mechanism for each view to jointly rearrange anchors according to respective view characteristics. Besides not involving selecting the baseline view, it also can coordinate with anchors in the unified framework and thereby facilitate the learning of anchors. Further, rather than forming embedding and then performing spectral partitioning, based on the criterion that samples and clusters should be hard assignment, we manage to construct the cluster labels directly from original samples using the binary strategy, not only preserving the data diversity but avoiding variance. Experiments on multiple publicly available datasets confirm the effectiveness of our DLA-EF-JA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Mulit-view Clustering",
"Large-scale Clustering",
"Anchor Clustering"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/29bb4bef8056309c3b6d660ebe85bbe78a9157eb.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Dual-level Affinity Induced Embedding-free Multi-view Clustering with Joint-alignment"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
58lbAsXCoZ | Neural Fluid Simulation on Geometric Surfaces | main | Active | Fluid simulation;Implicit Neural Representation;Exterior Calculus | applications to computer vision, audio, language, and other modalities | 1;6;8;10 | 5;3;5;3 | 1;3;4;4 | 1;3;3;4 | 1;2;4;4 | 6.25 | 4 | 3 | 2.75 | 2.75 | -0.523205 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "### Questions\n1. **Use of DEC Language**: The paper’s use of Discrete Exterior Calculus (DEC) is rigorous and suits the formal approach taken. However, many in the ML and physics communities might be more accustomed to traditional differential or vector calculus, so DEC may require more adjustment for those readers. Adding intuitive explanations alongside the DEC formalism could enhance accessibility, although this may vary depending on the preferences of other reviewers.\n2. **Handling Narrow Geometric Features in CPM**: The reliance on ambient space in CPM may lead to ambiguities when processing narrow or thin features. Clarifying whether this dependency impacts stability or accuracy for such geometries would enhance the framework’s applicability and inform potential adaptations to handle such cases.\n---\n### Suggestions\n1. **Missing Citations**\n 1. For by construction divergence-free field with neural network, maybe also cite [Deep Fluids](https://onlinelibrary.wiley.com/doi/10.1111/cgf.13619).\n2. **Clarifying Performance Gains Over INSR**:Intuitive explanation of why your method is > INSR > PINN when constrained by storage size. Intuitively, INSR is superior to PINN because it doesn’t record time in the neural field, so, given the same storage budget, INSR should and must outperform PINN. However, your method doesn’t gain from saving less information in the neural field to achieve higher accuracy (i.e., it doesn’t concentrate model expressiveness on specific features to achieve this). So, what is the intuitive reason behind your method’s improved results over INSR? Is it due to the CPM formulation or the Helmholtz decomposition? An “ablation” would be helpful here."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "### CPM Formulation\nThe math formulation is clean and concise. It is quite apparent that the authors are coming from a graphics background and I love this clean DDG writing style.\n- The Closest Point Method (CPM) is relatively new in visual computing, yet its integration with neural fields here aligns with my belief in CPM’s potential for solving PDEs on surfaces. Compared to surface sampling techniques (as seen in Geometry Processing with Neural Fields [Yang et al., 2021] and similar studies), CPM offers a structured way to define differential operators in volumetric data by rigorously establishing value transfer in the ambient space embedding the surface.\n- A persistent challenge in neural implicit representations is that, while data is represented volumetrically (e.g., through neural SDFs), the actual solutions are constrained to the 0-level isosurface. Sampling on this isosurface can be inefficient, but CPM provides an effective alternative by leveraging the ambient space, enhancing both efficiency and rigor.\n\nOverall, I would love to see this line of work being continued and the math formulation should be shared and seen within the ML community.\n\n---\nSome misc comments:\n- The related works section is thoughtfully composed, with necessary references cited and no excess, reflecting high-quality citation practices.\n- The choice of ground truth in this paper is well-justified and suitable for the presented comparisons."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel framework for simulating incompressible Eulerian fluid flow on 3D surfaces using neural implicit representations. This method leverages the Closest Point Method (CPM) and exterior calculus to parameterize the fluid’s velocity and vorticity fields directly on the surface without relying on discretization, which reduces memory costs and bypasses the need for conventional spatial discretization. The framework introduces a covariant-derivative-based advection process, which integrates surface flow dynamics while minimizing energy dissipation. Notably, this work is among the first to simulate incompressible fluid dynamics on neural surfaces, achieving enhanced accuracy and energy preservation across various geometric representations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have two concerns, regarding the claimed first and third contributions.\n\n---\n\n### Performance vs. Storage vs. Accuracy\nThe storage and accuracy benefits presented as a core contribution appear somewhat overstated since these gains stem from the inherent compact representation of neural fields, as noted in prior works like INSR-PDE (Chen et al., 2023). The neural network, here largely a standard MLP, serves as a model reduction tool or compressed parameter space. However, the substantial cost is slower simulation speeds, particularly noticeable in evolving the PDE on a neural representation, and this tradeoff is well-documented in the field, tracing back to foundational work like *Geometry Processing with Neural Fields* (Yang et al., 2021). Additionally, working with surface PDEs inherently mitigates spatial complexity compared to volumetric Eulerian approaches, further diluting the impact of memory savings in this context. Unless optimized network designs or implementation techniques were used, this contribution may feel more like a tradeoff typical of neural fields than a novel improvement.\n\n**TL;DR:** Without unique implementation optimizations, this tradeoff doesn’t stand out as an independent contribution, as neural networks naturally offer compact representations at the expense of computational speed.\n\n---\n\n### First to Simulate on Neural Implicit Surface Representation\nThe claim of being the first to simulate incompressible fluid flow on neural implicit surfaces is somewhat uncertain, as prior work using sampling techniques, like *Geometry Processing with Neural Fields* (Yang et al., 2021) or INSR-PDE, could also solve surface PDE like Laplace Equation by sampling on the surface. While it’s conceivable that these methods struggle with incompressibility when applied to Navier-Stokes, demonstrating their limitations would highlight the advantages of the Closest Point Method (CPM) for ensuring divergence-free constraints on neural surfaces. Including such comparative results, even as failure cases, could effectively underscore this paper’s unique approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How do you do the interpolation of the pulled forms? In the CP-EC poster the authors recommended the Cubic Lagrangian.\nHow does this interpolation affect the divergence-free property of the velocity field? Were there any numerical problems?\nIs it possible to do an ablation study on this point?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper shows that the recently introduced Closest Point Exterior Calculus (CP-EC) is very well suited to simulate fluid simulation on neural implicitly defined surfaces in 3D. The CP-EC allows to automatically guarantee the divergence-free properties of the vector field. The method achieves up to 15 times higher accuracy than previously used discretization methods on the surface with the same memory requirements, which is confirmed by extensive numerical simulations of different applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper builds on the recently introduced Closest Point Exterior Calculus (CP-EC) to propose a novel method for preserving the divergence-free property of vector fields on surfaces. By leveraging the closest point map, this approach seamlessly extends computations from the surface to the surrounding Euclidean space. At the core of the paper, Theorem 3.1 presents a specific construction for generating a divergence-free vector field on a surface using the CP-EC framework. This framework enables the calculation of gradient, divergence, and curl in a way that respects the intrinsic geometry of the surface, ensuring that the velocity field remains divergence-free when constrained to the surface. A key advantage of this method is its flexibility, as it supports simulations on various surface representations, including analytic surfaces, explicitly defined mesh surfaces, and, notably, neural implicit surfaces. The paper introduces a complementary advection process based on covariant derivatives for fluid dynamics, designed to minimize energy dissipation. Numerical studies confirm the framework’s accuracy, energy preservation, memory efficiency, and adaptability to geometry. Results show it achieves about 15 times higher accuracy than other methods with similar storage, offers 5 times memory savings over classic methods, and effectively models fluid dynamics. Additionally, the simulator's robustness is demonstrated through an end-to-end generation task and a real-world velocity field decomposition."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The English in the current version of the paper needs to be improved. Numerous articles are missing and sometimes the wrong words are used (subtle instead of subleties, divergence free instead of divergence free property, etc).\n\nCompared to the actual straigth forward application of the CP-EC to the case of flow simulation on surfaces, the paper seems cumbersomely long and is also not as clear to read as the recent papers on the topic referenced in the paper, whose presentation is clearer and more concise. Maybe the authors can try to improve on that."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "(1) Why introducing $f$ in equation (8) instead of using $\\sigma$ directly?\n\n(2) I think a $t$ subscript is missing in equation (8) ($\\Phi_t$)."
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "To be honest, I had a lot of fun reviewing this paper. Unless other reviewers flag major flaws that I could not find, I think it is ready for acceptance.\n\n* It is a very good example of when a simple and elegant core idea based on strong guarantees enables a lot of very interesting questions and consequences. The core idea of using the nilpotent property of the external derivative and the self inverse property of the Hodge star operator to force a divergence free vector field on a surface is elegant and foment all the paper discussion.\n\n* The loss formulation is as expected, very intuitive.\n\n* As far as I know, It is the first method that converges in implicit surfaces.\n\n* The method does not rely on training data.\n\n* Evaluation is robust. The idea of starting with analytic examples where ground truth is easier to evaluate is good.\n\n* Related work section cites every paper I could think of. The care with the citation of classic papers (even for datasets) is notable.\n\n* Mathematical notation is very clean. It easy to see that there is a lot of effort with notation polishing.\n\n* The paper makes use of very good references for background Math."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a framework for fluid simulation on surfaces that is divergence-free by construction. This is done by using exterior calculus tools, in special the definition of the divergence based on the Hodge star operator and the exterior derivative, the property that the Hodge star is (up to a sign) its own inverse and the nilpotent property of the exterior derivative. The Closest Point Method is used to apply those tools on generalized surfaces, making a natural link with Riemannian geometry, and enabling the evaluation in the tangent space around samples in the surface. With those tools it is possible to transit between the surface and $R^3$ as needed, in special for advection which can be done considering the Riemannian metric of the manifold."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I will point some minor weaknesses that could be fixed to improve the paper.\n\n(1) An image depicting equation (4) and another showing the advection process would greatly improve the friendliness of the paper since both processes are very geometric. That would make the paper be appreciated by a broader audience.\n\n* In the image for equation (4) it is sufficient to show the neighborhood of the surface, the mapping $j$, the mapping $cp*$, the vector resulting from the gradient and the tangential vector acquired from cross product of the gradient with the normal.\n\n* For the advection image it is sufficient to depict the push forward (pull back) function in action and the inner product using the Riemannian metric.\n\n(2) The presentation could be more friendly by giving some intuition along the text. I will point some places I think this kind of intuition would be beneficial.\n\n* Line 199: could say that the even though the divergence may be expressed using different k-forms, the definition of div(v) is the 0-form version resulting in a scalar function.\n\n* Line 299 (equation 4): could say that $cp^*\\sigma$ is a notation abuse because $cp^*$ expect a k-form but $\\sigma$ is a 0-form. Also that the composition with $j(x)$ is to restrict the computation to the surface, the gradient is to acquire a vector field and the cross product is to acquire a tangent vector field. A reference to the proposed image would also be good here.\n\n* Line 234 (equation 5): could say that that vorticity expression considers the rotation axis equals to the normal because it is evaluated on the surface. Then the vorticity may be represented as a scalar field.\n\n* Line 257: could say that the expression is a neighborhood extension of the surface along the normal field.\n\n* Line 320 (equation 13): could say that the $<. , .>_p$ notation is an inner product considering the Riemannian metric of the manifold of the tangent space at point p. A reference to the proposed image would be good here.\n\n* Line 327 (equation 15): could say that the inner products are the first-order approximation of the push forward function.\n\n* Line 344: that paragraph could say that the harmonic components do not contribute to the vorticity and that is the reason why the additional harmonic network is needed. Could also say that it is constant along the simulation because it is associated with the topological structure of the surface, which does not change over time.\n\n(3) This paper deserves an acronym so it may be more easily referenced in the future by other researchers. I advise the authors to think about changing the title to include a creative acronym."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- Did the authors explore alternative network designs for representing the implicit neural fields (such as \"Instant Neural Graphics Primitives with a Multiresolution Hash Encoding”) ?\n- How does the method fares in simulations where regions of turbulence are highly concentrated? Is the proposed adaptivity property working as expected?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "Proposing alternative representations to the standard discretizations (e.g., grid/meshes) for solving PDEs is a very interesting and challenging topic of research. The authors propose a method that considers specific intricacies of the PDE solution when employing neural representations, along with desired properties that can potentially be satisfied in a continuous fashion (e.g., the divergence-free condition)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an implicit neural representation to improve solvers that simulate flows on geometric surfaces through geometric adaptivity. The authors propose a neural physical simulation framework to construct a parameterized vector field on surfaces using exterior calculus formalism. Through a Closest Point Method, it is proposed an implicit neural network representation that is able to maintain a divergence-free property intrinsically. Divergence-free is an important property of Navier-Stokes solvers, and strictly enforcing them is a challenging task. Furthermore, the authors claim that the proposed approach is able to accurately preserve the energy of the flow as time advances."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Unfortunately this paper is clearly below the ICLR quality acceptance bar. My major concerns are as follows:\n- Poor exposition along with several typos which make the paper hard to understand. For example. the structure of Section 3.1 is composed of fragmented phrases forming very short paragraphs, making it hard to follow. Several typos and confusing phrasal structures (L11: “Incompressible Euler fluid on the surface”, L20: “We contribute a neural physical simulation framework on the surface with the implicit neural”, L240: “In the meanwhile\" to name a few) greatly compromise the quality of the paper. \n- The main idea of the paper is based on wrong assumptions. The poster “Closest Point Exterior Calculus”, in which the paper is heavily based, already offers a solution that is independent of the mesh quality. This invalidates one of the main motivations of this submission that previous approaches are dependent on mesh quality, and thus an implicit neural representation is required. Moreover, the assumption that storage is a limiting factor on solvers is also incorrect, since a solver usually has to store a single time-step of the represented variables for advancing the simulation state. The presented results also show very modest resolutions. \n- There are missing references and/or previous methods are not thoroughly considered, leading to an outdated methodology proposition. Recent approaches (“Covector Fluids”, “Impulse Particle In Cell”, “Fluid Simulation on Neural Flow Maps”, “Eulerian-Lagrangian Fluid Simulation on Particle Flow Maps” and “Lagrangian Covector Fluid with Free Surface” to name a few) adopt structure preserving integrators by considering the deformation of the flow map during advection. This is ignored by the proposed advection method, which has a rather lengthy description in the paper. Lastly, Elcott et al, 2007b does not suffer from instabilities as it is mentioned in the manuscript and modern structure preserving solvers (\"Impulse Particle In Cell”) are able to accurately advect velocities without major stability issues. \n- The paper partially focuses on showing mathematical proofs that are known by the exterior calculus community (divergence free vector fields on surfaces), which make the described theory not so relevant as new theoretical contributions. The authors could just reference relevant discrete exterior calculus material or move the lengthy mathematical descriptions to the Appendix. \n- The paper should have been focused on more relevant aspects of the implicit neural representation, such as network structure, how to properly tackle high-frequencies of the implicit neural field, how to make the training/evaluation process efficient (e.g., check “Instant Neural Graphics Primitives with a Multiresolution Hash Encoding”), etc.\n- The authors mention that pressure projection (usually the most expensive part of a fluid solver) is not required by their approach. However, they solve a non-linear optimization problem iteratively with a simple ADAM gradient descent approach. This approach is way less efficient than traditional operator splitting, as evidenced by the timings shown in Table 1 (16h for 80k vertices is a very inefficient timing for the considered resolution). Lastly, there seems to be some high-frequency “ringing” artifacts generated by the proposed method in Figure 3 which are not present in ground truth or in the HOLA-7 results. \n\nThese are some of the reasons that justify my low score for this paper. I suggest the authors to rethink their approach before resubmitting the manuscript."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "An implicit neural representation based physical simulator for fluid dynamics on surfaces"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024neural,\ntitle={Neural Fluid Simulation on Geometric Surfaces},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=58lbAsXCoZ},\nnote={under review}\n}"
},
"abstract": {
"value": "Incompressible Euler fluid on the surface is an interesting research area in the fluid simulation, which is the fundamental building block in visual effects, design of liquid crystal films, scientific analyses of atmospheric and oceanic phenomena, etc. The task brings two key challenges: the extension of the physical laws on 3D surfaces and the preservation of the energy and volume. Traditional methods rely on grids or meshes for spatial discretization, which leads to high memory consumption and a lack of robustness and adaptivity for various mesh qualities and representations. Many implicit representations based simulators like INSR are proposed for the storage efficiency and continuity, but they face challenges in the surface simulation and the energy dissipation. We contribute a neural physical simulation framework on the surface with the implicit neural representation. Our method constructs a parameterized vector field with the exterior calculus and Closest Point Method on the surfaces, which guarantees the divergence-free property and enables the simulation on different surface representations (e.g. implicit neural represented surfaces). We further adopt a corresponding covariant derivative based advection process for surface flow dynamics and energy preservation. Our method shows higher accuracy, flexibility and memory-efficiency in the simulations of various surfaces with low energy dissipation. Numerical studies also highlight the potential of our framework across different practical applications such as vorticity shape generation and vector field Helmholtz decomposition."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Fluid simulation",
"Implicit Neural Representation",
"Exterior Calculus"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cffe916f67661f482a28a84af0345b65197b0ba3.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/59ec4282a46eb0f1c7a432000630ee89d7b220fe.zip"
},
"title": {
"value": "Neural Fluid Simulation on Geometric Surfaces"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
590yfqz1LE | Measuring Non-Adversarial Reproduction of Training Data in Large Language Models | main | Active | large language models;memorization;data extraction;originality;privacy | alignment, fairness, safety, privacy, and societal considerations | 3;5;5;6;6;6;8;8 | 4;3;4;3;4;5;4;4 | 2;2;3;3;3;3;3;3 | 2;2;3;3;3;3;3;4 | 2;3;2;4;4;4;3;4 | 5.875 | 3.875 | 2.75 | 2.875 | 3.25 | 0.118781 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please refer to sections above and answer the questions"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, this paper was a joy to read. I found it to be very thoughtfully written. I routinely ended up in situations where I had a particular thought experiment in mind, and the next section showed just that set of ablations or experiments.\n\n- I liked Figure 4(b) which shows how reproduction strongly depends on the task. This consolidates an important finding/hypothesis. At a high level, while the paper reports that 8-15% of LLM-generated text overlaps with existing online snippets, it goes further to analyze the types of overlaps. This also highlights the complexity of defining \"problematic\" reproduction.\n- I found the experiment on extracting Quotations quite interesting, in particular, because it shows incorrect attribution of the quote.\n- Distribution of overlap lengths: A small percentage of outputs contain very long reproduced sequences. This long-tail phenomenon suggests that even with low average reproduction rates, LLMs can still pose risks in specific cases."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work discusses the case of unintentional reproduction of training data by large language models. While most of the literature discusses an adversarial nature of prompting to extract training data, this work tries to quantify how frequently this influence happens in a non-adversarial situation. One of the findings of the work is that non-adversarial reproduction is much higher in expository tasks than in creative ones, and even prompting techniques, while they can reduce the average reproduction rate, are not sufficient to prevent longer sequences from appearing. One of the highlight results of this work is that about 15% of the text output by popular conversation language models overlaps with short snippets of text on the internet, much higher than baseline rates by humans on the same task."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Frequency of Reproduced Sequences: The paper could benefit from clarifying how often the reproduced sequences appear within their training data proxy, AUXDATASET. Understanding whether these snippets are rare or commonly encountered would help contextualize the reproduction risks.\n\n- Justification of 50-Character Threshold: The choice of a 50-character threshold to define reproduced sequences is not fully justified. In particular, this is quite different from past work. While some examples in the Appendix suggest that 50 characters is a meaningful number, I believe most examples highlight that such sequence lengths can be so common in the natural language distribution that their reproduction does not matter. Further explanation would help readers assess whether this threshold adequately captures the difference between common phrases and more problematic reproductions.\n\n- Data in Figure 2(b): Figure 2(b) appears to have only partial bar plots for some models (Llama and GPT), making the comparison across models less robust. Or am I missing something here?\n\nOverall, I am constantly battling between thinking that 50 characters is too less, and then seeing the argument that these reproduction rates are much higher than humans. This makes me wonder if humans are the right baseline here. Would a human with a passage (RAG style reading comprehension) be a better baseline? There is a qualitative dichotomy here: the 50 characters do not feel meaningful when visualized, yet stay higher than what a human would reproduce."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. (Line 162 - Question about human-written baseline) Even with this measure of choosing content after LLM cut-off date and content which is not part of AUXDATASET, how is it confirmed that the content taken from Reddit is not LLM generated? Is it not possible that an LLM might have been used to generate it?\n\n2. (Line 321) The paper mentions, “We find that LLMs reproduce more existing data than humans, except when humans do blatant plagiarism.” I might have missed it in the text, but it would be great to have some clarification regarding how this is controlled? For example, given a prompt like “Write a tutorial about setting up an Nginx server.”, humans might be prone to copy data verbatim from top-ranked articles on a search engine. There is a discussion in Line 404 about IMDb reviews, but what measures were taken for Reddit content?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is an extensive analysis of situations where LLMs generate text from training data verbatim, even when not explicitly prompted to reveal such information. The later case has been seen in adversarial attacks against LLMs in recent research. So, the results from this study can be used to inform us about scenarios where data leakage happens without explicit adversarial effort."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper tackles the problem of mitigating non-adversarial training data reproduction (natural and benign prompts from users revealing training data verbatim) in LLMs. The experiments find that in some cases as much as 15% of text output by LLMs overlaps with moderate snippets (40-60 characters) of the Internet. A comparative analysis finds that human-written text has far less overlap compared to LLM generated text. The paper proposes prompting strategies to close this human and LLM gap. Though these strategies close the gap between LLMs and humans on average, the paper suggests that worst-case reproduction might need stronger adversarial defenses.\n\nThe classes of tasks chosen for LLM text generation in this paper can be broadly classified into *creative writing*, *expository writing*, and *argumentative writing*. Since training data information is not available for certain models, the training data is approximated by collecting a large dataset of Web content (AUXDATASET).\n\nThe primary metric used is overlap rate (the percentage of characters in each generation that belong to a substring of at least 50-consecutive characters found exactly in AUXDATASET)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The current text is ordered as a set of subsections with a verbatim description of experimental steps. The presentation lacks focus on the main contributions of this research. For example, if this is the case, it should probably be highlighted that LLM data leakage studies for benign prompts haven't been looked at. Furthermore, instead of presenting all the results (as in Section 3) as small headings and text, it would help to have an additional small section which highlights the most important contributions which readers can take away from the paper.\n\nI have some concerns about the collection of human data regarding plagiarism and contamination. Please refer to the Questions section."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Can the authors reason why the 50-character limit beyond simply the argument that non-adversarial prompts reproduce less training data? The qualitative analysis of those 50-character snippets is appreciated, but as the authors showed, many of them are common phrases that might not constitute problematic behaviour from LLMs.\n- Can the authors provide more details on how their manual prompts were created? Were they crowdsourced, or written by the authors themselves? Were they sourced from how authors themselves commonly use LLMs, or were they thought up in bulk at once? Were there efforts made to categorize them into a variety of prompts (beyond the three broad categories used in the paper), or maybe efforts made to check this variety after the prompts were created? No answers are bad answers here, even if the prompts were written by the authors in bulk in one sitting to capture the broad categories defined, that's a good start. But in any case, details are needed."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Incredibly relevant work. While the pessimist in me believes that LLM developers will always find new excuses to argue why LLMs regurgitating sensitive or proprietary data is not their responsibility, it is important to try and keep holding them accountable. In the context of adversarial reproduction of training data not being \"typical or allowed user activity\", this work plays an important role in highlighting how even everyday use of LLMs can reproduce training data.\n- Wide range of experiments, both in terms of different models, as well as verifying various hypotheses. Lots of interesting insights.\n- Qualitative analysis and handpicked examples. I was happy to see some qualitative analysis by the authors, especially of the long tail."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper shows how language models can reproduce training data even with 'non-adversarial' prompts. While LLMs have been previously shown to reproduce training data, these experiments were conducted with adversarial assumptions, and the prompts used can be considered a violation of user policy by many LLM developers. The authors argue that even under the assumption of non-adversarial prompts, i.e., everyday use prompts that are not targeted at extracting training data, one can see LLMs regurgitating their training data. The authors provide a wide range of experiments on many different SOTA conversational LLMs and with many different categories of prompts to support their hypothesis."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The use of a 50-character limit for overlap rate. I'm not convinced that the 50-character limit is strong enough to cause issues for LLMs reproducing training data. I'm not familiar with legal precedence on reproducing text without attribution; but at least when quoting from other sources, the limits are usually looser - even the strictest being around 25-50 words and usually, it is a few hundred words (https://ogc.harvard.edu/pages/copyright-and-fair-use, https://stevelaube.com/how-much-can-i-quote-from-another-source-without-permission/). Although, it should be mentioned that the authors are very open about their overall results and also discuss the long-tailed part of the reproduction, which highlights some actual issues. But despite this, their main results and trends are focused on an overlap rate defined with a 50-character limit.\n- Lack of details on additional prompts used in the experiments. The authors have created some manual human-written prompts, which are used alongside data scraped from Reddit, in their experiments. I understand that releasing all these prompts during the reviewing phase might not be practical, and I appreciate the authors mentioning that they will release them in a later version of the paper, but I would like to see some details in the paper to perform a proper review of their work. More details on this in the questions below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) Are strings of length 40-60 (Line 45) or 50 (Line 129) considered?\n2) Do the reproductions occur in similar contexts to the originals?\n3) Lines 162-164 - what is the filtering procedure for the human-written text for it to not appear on the internet? If it is filtered, how did plagiarism appear in the human-written IMDb reviews?\n\nNitpicks:\n1) Figure 1 could be improved by adjusting the colour scheme and ensuring the readability of the small text.\n2) The use of “aligned” in section 5 could be more precise. While Christiano et al., 2017 and Ouyang et al., 2022 describe alignment as a continuous objective of RLHF fine-tuning, Nasr et al. (2023) simply uses “aligned” to describe models that have undergone RLHF. To avoid this ambiguity, more specific terms like “RLHF-tuned” or “alignment fine-tuned” could be used to describe these models."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1) The paper is well-written and accessible. The figures effectively convey the key findings.\n2) The empirical results are extensive and presented in a way that is easy to interpret. The authors test the most relevant models (including GPT, Claude, and Llama) and use varied datasets including real conversations.\n3) The experiments are well-designed, the knowledge cutoffs and dataset limitations are addressed in the text."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper explores to what degree LMs reproduce their training data in natural circumstances, i.e. settings where there is no adversarial pressure to reproduce the text. Human-written text is used to compare the extent to which the completions have exact matches in AuxDataset. The results show that unintended reproduction occurs more if the text is generated by models instead of humans, and if the task is expository, e.g. tutorials. Two system prompts are investigated for mitigating unintended reproduction, yielding moderate success."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) Only exact matches are considered, excluding reproduction of close matches with a low hamming-distance or reproduction of semantics.\n2) The results are harder to interpret due to the possibility that a large number of reproductions by both humans and models is not captured by using AuxDataset. Perhaps the extent of the problem could be estimated by running the tests on a dataset that is expanded with additional sources and comparing the resulting numbers to the current ones.\n3) The selection of 50 character length seemed insufficiently motivated. Especially since it is different from the prior work and results in both memorised and unoriginal phrases being included."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. How fast can you check whether a given generation is in AuxDataset? Curious if we can reduce the probability of regurgitating very long text by doing a check against an internet proxy dataset at inference time.\n\n2. It's very interesting that the system prompt you used below reduces reproduction length. Why do you think this works? Did you try any other system prompts outside of this and the Claude prompt? Is it because the model can internally model a circuit to determine the probability of text being in its training data? If this is the case, it's even more interesting that this works nearly just as well for the smaller Llama 8B model as the larger 405B model, especially since the 8B model is expected to have weaker internal circuits.\n\nPrompt: ```Your primary goal is to produce content that is completely original and free from any existing text on the Internet. This means that if someone were to take a few words from your explanation and search for them online, they can not find any matching results.```"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Significance: This paper is interesting because it quantifies the known phenomenon of LLMs reproducing text from their training data. In contrast to prior work, it attempts to evaluate a natural distribution of reproduced text lengths. The topic is important as LLMs are commonly used as assistants: there is a quantified risk to using LLMs for writing and code generation, as training data reproduction could result in unintentional plagiarism from the end user. This is exemplified by one of the most interesting findings of this paper is that benign prompting can result in reproduced text of 100 characters (2.5% of the time) and 1000 characters (0.01% of the time).\n\nQuality: The authors conduct extensive analysis to break down the reproduction by text category and also demonstrate that reproduction rate is higher than human written text, including qualitative analysis and a simple mitigation strategy, with a clear presentation of their findings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper measures character-level verbatim text produced by LLMs (GPT, Claude, and Llama) on benign prompts and conversations, in contrast to adversarial membership inference attacks. The authors find that LLMs indeed reproduce internet text up to 15% of the time (50+ characters), in the worst case regurgitating over 1000 characters. The authors provide a breakdown of severity by task and compare to human baselines. Finally, the authors find that setting a system prompt can reduce this form of text reproduction for shorter sequences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. 50 character cutoff may overestimate regurgitation: The authors acknowledge this limitation, but it is difficult to differentiate based on character length alone whether the data is truly regurgitated off the internet or just due to it being a common phrase, especially when the length is around 50 characters. Additional analysis to estimate a numerical proportional breakdown between these two categories would make the paper more rigorous. There is far less doubt about text reproduction vs. common phrases past the 100-150 character point.\n\n2. AI contaminated human baselines: Since the human baselines were scraped off the internet after the training cutoff of these models, they could already contain AI generated text from a prior generation of LLMs rather than represent a natural distribution of human writing. I would find it interesting if you can also evaluate the reproduction length distribution of human data known to be mostly free from AI contamination, i.e. before the widespread release of LLM assistants."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why do the authors not consider open-source LLMs where they can know which datasets are used for training? For example, in the Membership Inference Attack area of LLMs, researchers usually use Pythia and the Pile dataset.\n\n2. Why do the authors collect their own dataset instead of WildChat and LMSYS-Chat-1M? What is the unique advantage of the new dataset?\n\n3. Why do the authors consider substrings of 50 words? How will the results change if changing the threshold?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper studies a very interesting question about quantifying non-adversarial reproduction in LLMs, which is practical in using LLMs.\n\n2. The analysis of the question is comprehensive, containing the conclusion for different tasks. The study provides a good understanding of how and when LLMs are more likely to reproduce training data.\n\n3. The exploration of prompting as a mitigation strategy gives good insights, showing both its potential and limitations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates non-adversarial reproduction in Large Language Models, where models unintentionally reproduce strings of their training data in response to benign prompts. By analyzing various tasks including creative writing and expository writing, the authors found that 10%-15% of LLM-generated content overlaps with internet text, significantly more than human-generated text. The study shows that expository tasks are especially prone to this phenomenon. Although specific prompting strategies can reduce the frequency of reproduced content, they do not fully prevent long sequences from appearing, indicating a need for stronger measures to mitigate unintended data leakage."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The evaluation results might be biased because the authors cannot access the real training dataset of evaluated LLMs. \n\n2. Some points in the paper are not very clear. For example, for the prompt mitigating part, the authors do not demonstrate which dataset are they using. And since they can use WildChat or LMSYS-Chat-1M, what is the motivation for collecting a new dataset?\n\n3. The length of substrings that are used to calculate the overlap rate is strange. This paper considers a substring of 50 words, which is 'shorter than the 50 token (150–200 characters) threshold used in previous studies on adversarial extraction'. However, the authors do not provide a decent reason for using 50 words. The authors also mention that a substring of 50 words could be both common or unique sentences. However, I do not think a common sentence or phrase should be considered as a leakage of training data. Using such a standard could make the evaluation results further biased."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. I suggest that the authors consider using some training data detection methods (e.g., [1]) to assist in identifying training corpus when exploring reproduction of training data.\n\n[1] Detecting Pretraining Data from Large Language Models. (ICLR-24)"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Non-adversarial reproduction is valuable for protecting LLM outputs from risks such as infringement and privacy violations.\n \n2. The authors validate the existence of non-adversarial reproduction risks across several mainstream models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors investigate the issue of non-adversarial reproduction, which refers to the overlap between LLM outputs in response to natural prompts (mainly writing prompts) and internet text. The authors argue that LLM-generated text carries a higher risk of overlapping with internet text than human-written text. Additionally, the authors explore the preliminary use of prompt design to reduce overlap."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors’ conclusion seems somewhat obvious, as LLMs are explicitly fit on internet text. Intuitively, LLMs are more likely to produce text resembling their training corpus than humans. The authors should better articulate the value of their findings.\n\n2. Building on the first point, the authors propose using prompt design to mitigate overlap. However, the method and its underlying principles lack significant innovation.\n\n3. The authors appear to conflate reproduction of internet text and training data. These are not equivalent, as the training data depends on the model's degree of fit. Especially when using a simulated dataset, this discrepancy may be amplified.\n\n4. The task is limited to writing. I suggest the authors consider extending it to other tasks. Generally, open-ended writing tasks are more likely to lead LLMs to recite memorized training data."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In the **human-written text dataset**, how do you make sure that the human-written texts are not actually generated by an LLM? Cause human may use LLM to generate these texts."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The motivation of this paper is clear and specific. This paper propose a new threat \"non-adversarial reproduction of training data\".\n- This paper provides solid experiments across a large range of LLMs.\n- This paper creates a new task dataset, and propose a method to evaluate the human reproduction baseline, to evaluate the human reproduction baseline, they collect a human benchmark dataset.\n- This papaer is well written, easy to follow and understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the problem of \"non-adversarial production of training data\", which is the problem of \"how often do LLM copy the training data on normal user prompts?\". The authors craft some tasks and used two existing user prompts datasets, and use the datasets to check for overlap between the LLM outptus and their training dataset. Since their actual training dataset is unknown, they instead of a webscale corpus: AuxDataset, as a proxy. Their result shows that 5%-15% of LLM outputs are reproduction of training data. They have tested with using prompts to mitigate this problem, and they show that prompts can reduce the reproduction rate, but can not prevent long-tail leakage of training data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In section 6, you mentioned that it's hard to distinguish reproduction of common idioms from problematic memorization. Is it possible to estimate how much of the overlap is problematic? Cause sometimes citing a known source is not a problem, so that may not be considered a problematic reproduction.\n- You have tested on temperature of 0 and 0.7, both are low temperature. Can you add experiments on temperature higher than 1 to see the reproduction rate under high temperature?\n- You have tested two system prompts in section 4, can you test with more prompts?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We measure the frequency at which LLMs reproduce training data when not prompted to do so adversarially, and find that it can happen frequently even on accident."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024measuring,\ntitle={Measuring Non-Adversarial Reproduction of Training Data in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=590yfqz1LE},\nnote={under review}\n}"
},
"abstract": {
"value": "Large language models frequently memorize parts of their training data.\nThis behavior led to a large body of research on data extraction attacks,\nwhere adversaries coerce a model to output memorized examples.\nHowever, most LLM users are not malicious;\nthey only want an LLM to perform some desired task.\nIn this work, we investigate non-adversarial reproduction,\nwhere the outputs of a large language model overlap with existing public text\nwhen responding to natural and benign prompts.\nFor a variety of innocuous prompt categories\n(e.g., writing a letter or a tutorial),\nwe show that up to 15% of the text output by\npopular conversational language models overlaps with moderate snippets (40–60 characters) of the Internet.\nFor the same tasks, we find that human-written text\nhas far less overlap with existing Internet data.\nWe further study whether prompting strategies can close this reproduction gap\nbetween models and humans.\nHowever, while appropriate prompting can reduce non-adversarial reproduction on average,\nwe find that mitigating worst-case reproduction of training data\nrequires stronger defenses—even for benign interactions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language models",
"memorization",
"data extraction",
"originality",
"privacy"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8af34883b695042089289edb6cea12e6bbf9bdfc.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Measuring Non-Adversarial Reproduction of Training Data in Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
599F4CZ0HB | Bench-O-Matic: Automating Benchmark Curation from Crowdsourced Data | main | Active | LLM;Evaluation | datasets and benchmarks | 5;5;8 | 3;2;3 | 2;2;3 | 2;2;4 | 3;3;3 | 6 | 2.666667 | 2.333333 | 2.666667 | 3 | 0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Is there any estimation on the error/quality of the data generated? Or using some metrics to evaluate the similarity of the generated data with the real-world data?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Bench-O-Matic efficiently creates high-quality benchmarks from crowd-sourced data without human input, the work addressed the scalability issue in benchmark curation.\n- The work Introduces novel metrics like Separability with Confidence and Pair Rank Brier Score, enhancing the robustness and reliability of benchmark assessments.\n- Eval-O-Matic achieves strong performance alignment with human preferences for only $20 per evaluation, and provides a cost-effective alternative to static benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work proposes Bench-O-Matic, a system for automatically curating high-quality, open-ended LLM benchmarks by using large-scale, crowd-sourced data. This tool addresses the need for evolving benchmarks that adapt to the rapid development of LLMs without requiring human intervention."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Quality insurance. The seven quality criteria may not fully encompass the diversity of user tasks, potentially favoring specific types of prompts over others.\n- The synthesis of data is reliant on on LLMs as Judges. The LLM-as-a-Judge framework may introduce stylistic or self-bias, even with adjustments, which could influence benchmark objectivity in certain cases."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Previous studies have shown that fine-tuning the LLM-as-a-Judge can significantly improve evaluation robustness. Has this been considered in the current work? This could help improve the quality of the judges, the main limitation of this benchmark.\n- In Section 4.2, it states, \"We also ensure the final dataset is free from personally identifiable information or offensive content.\" Could the authors elaborate on how this is achieved? Was this done manually or automatically with the help of an LLM?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The problem statement is clearly defined.\n- The paper addresses a significant challenge highly relevant to the current state of AI and places well in the current literature.\n- The pipeline is flexible and open-ended, allowing for continuous improvements over time.\n- The experiments are comprehensive, demonstrating that the pipeline effectively creates benchmarks based on the metrics defined in the paper, with multiple LLMs evaluated on Eval-O-Matic.\n- The paper presents new ideas to evaluate benchmarks to overcome previous issues."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an automated pipeline, Bench-O-Matic, designed to curate prompts and create benchmarks for evaluating large language models (LLMs). The authors propose new metrics to assess benchmark quality, ensuring a clear separability of confidence scores and alignment with human preferences. The prompts are organized into topic clusters to ensure diversity, and an \"LLM-as-a-Judge\" approach is used to evaluate responses from various LLMs, fully automating the evaluation process. Additionally, the paper presents two novel benchmarks generated using this pipeline: Eval-O-Matic, based on Chatbot Arena, and Wild-O-Matic, derived from WildChat-1M."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Using an LLM to evaluate other LLMs’ responses may limit the complexity of the benchmark prompts. While employing an ensemble of judges partially mitigates this issue, there is still an inherent limitation. However, the advantages of an automated pipeline outweigh this concern, and the authors have implemented techniques to reduce evaluation biases.\n\nI have a hard time finding weaknesses for the paper. It is a well-executed and solid paper, though not necessarily groundbreaking.\n\n**Minor Comments** \n- On line 80, \"achieve 98.6% correlation\" should be \"achieve**s** 98.6% correlation\".\n- On line 82, \"Our work**s** makes\" should be \"Our work makes\".\n- On lines 206 and 352, \"Section C\" should probably be changed for \"Appendix C\" for clarity.\n- On line 464, \"an regression based approach\" should be corrected to \"**a** regression-based approach.\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Is the approach really adaptable/configurable ? Restate the claims if not. \n- Can the approach work irrespective of humans in the loop ? i.e., crowd-sourcer providing initial prompts. \n- human Annotation study; \n How many human annotators were involved?\n What was the inter-annotator agreement?\n How were discrepancies between annotators resolved?\n Were the human annotators experts in any particular domains?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The promise of the paper is excellent if delivered -- Reconfigurable automated benchmarks without humans in the loop and via crowd sourced data. With a series of prompting techniques in the pipeline, the approach is fair and well studied. Key innovations are in the design of metrics to separate various models and crux of thesis on generating evaluation data that is of high quality and can be separable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes approaches to automate the benchmark generation process via the prompting of LLMs. The proposals for different characteristics to establish the baselines are fair and the contributions are around the different scoring mechanisms to 1) evaluate the quality of prompts 2) LLM-based judging of prompt outputs to generate 1-5 score instead of binary preferences and 3) combining them with statistical aggregators to differentiate end evaluate different LLM outputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The key weaknesses around this paper are the claims that the proposed approach is human-free and easily configurable as shown in the Table comparing the multiple methods. Given that the approach leverages use of ChatBotArena supplied queries and even though the quality filter will remove the specific poor quality queries, it is not free from the input i.e., humans prompting the different LLMs on the arena and easily being configured to a use case that end users may have in mind. Discussing results on adapting the evaluation framework to beyond what is available in Chat bot Arena would be needed to support the claims of the paper. Also it would be good to discuss potential biases introduced by using ChatBotArena queries as a starting point. The paper could be strengthened by providing concrete examples or experiments showing how their approach could be adapted to different domains or use cases beyond ChatBot Arena data\n\n\n\nAn additional area of concern is that almost every step of the pipeline involves a prompt engineering exercise including scoring the final models on a scale of 1-5. This is standard but the question emerges on the fidelity of the LLMs themselves and when they hallucinate themselves. As evidenced by the score sorted by topic cluster, the data does show that for exact answer situations like Python game coding versus loose open ended questions the LLM-judges are not very good. To strengthen the paper - discuss potential failure modes or biases introduced by relying heavily on LLMs, provide more detailed analysis of how performance varies across different types of questions or topics and suggest ways to mitigate or detect potential hallucinations or errors introduced by LLMs in the pipeline\n\n\nThe details of human annotation were very unclear. See questions below."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Scalable curation of high-quality, automated benchmarks from extensive data without human in the loop."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024benchomatic,\ntitle={Bench-O-Matic: Automating Benchmark Curation from Crowdsourced Data},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=599F4CZ0HB},\nnote={under review}\n}"
},
"abstract": {
"value": "The rapid evolution of Large Language Models (LLMs) has outpaced the development of model evaluation, highlighting the need for continuous curation of new,\nchallenging benchmarks. However, manual curation of high-quality, human-aligned\nbenchmarks is expensive and time-consuming. To address this, we introduce Bench-O-Matic, an automated pipeline that leverages LLMs to curate high-quality, open-\nended prompts from large, crowd-sourced datasets, enabling continuous benchmark\nupdates without human in the loop. We apply Bench-O-Matic to datasets such as\nChatbot Arena and WildChat-1M, extracting challenging prompts and utilizing\nLLM-as-a-Judge for automatic model evaluation. To validate benchmark quality,\nwe propose new metrics to measure a benchmark’s alignment with human preferences and ability to separate models. We release Eval-O-Matic, a benchmark\nconsisting 500 challenging prompts curated by Bench-O-Matic. Eval-O-Matic\nprovides 3x higher separation of model performances compared to MT-Bench and\nachieves 98.6% correlation with human preference rankings, all at a cost of $20.\nOur work sets a new framework for the scalable curation of automated benchmarks\nfrom extensive data."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"Evaluation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/49edb452c360143cb7da6d2a5ca85004241a37e3.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bench-O-Matic: Automating Benchmark Curation from Crowdsourced Data"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
59r0ntInvF | Haste Makes Waste: Teaching Image Restoration to Learn Distributions from Pixels to Patterns | main | Active | Image Restoration;Low-level Vision;Training Strategy | applications to computer vision, audio, language, and other modalities | 3;5;6 | 4;5;3 | 2;3;3 | 1;3;3 | 3;2;3 | 4.666667 | 4 | 2.666667 | 2.333333 | 2.666667 | -0.327327 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "None"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper proposes a novel training approach that addresses the complexity of learning pattern distributions in IR by breaking it down into simpler stages, which is a creative solution to a known challenge in the field.\n\n2. The method is evaluated extensively on benchmark datasets, demonstrating consistent improvements across different models and tasks, which speaks to the robustness of the approach.\n\n3. TRAPS can be integrated into existing supervised IR methods without additional burden, making it a versatile tool that can potentially benefit a wide range of IR models.\n\n4. The paper provides a theoretical justification for the training strategy by modeling the IR task as an optimization problem involving distribution mapping, which adds depth to the understanding of IR processes."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new training strategy for image restoration (IR) tasks. The strategy, named TRAPS (InTRA-patch Pixel-Shuffle), addresses the IR problem by modeling it as a distribution mapping challenge from two perspectives: intra-pixel regression and inter-pixel interaction. The method starts by teaching the model to learn a simpler pixel-by-pixel distribution, which serves as a prior and inductive bias, and then transitions to learning cross-pixel pattern distributions. The proposed approach aims to improve the model's ability to learn complex pattern mappings between degraded and clean images by breaking down the learning process into more manageable stages."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not discuss the potential for overfitting, especially since the model is learning from a shuffled pixel distribution, which could lead to different characteristics compared to natural image statistics.\n\n2. Although the method shows good prospects in IR tasks, it is not clear how well it can be generalized to other low-level vision tasks. Because the method proposed by the author is very simple to implement and the theory is simple, sufficient experiments are the premise to prove its effectiveness. At present, only two tasks do not seem to be enough to prove its effectiveness and scalability. Other common restoration tasks are also necessary, including: image super-resolution, image dehazing, image deraining, low-light enhancement, etc.\n\n3. The method proposed in the article is simple and effective. But what is its computational cost for the network? If the repair network is larger, will the computational cost of this method also increase? This part should be further analyzed.\n\n4. Are there other visualizations and more detailed theoretical justifications that could further support the proposed optimization directions?\n\nSince the article is quite interesting and has a new perspective, I will make corresponding changes based on the author's rebuttal."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weaknesses"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**Enhanced Performance**: The proposed training strategy introduces an inductive bias that can significantly boost the performance of existing supervised infrared (IR) methods, leading to improved accuracy in pixel-to-pixel regression tasks.\n\n**Seamless Integration**: This paper's strategy can be easily incorporated into current IR frameworks without requiring major modifications, allowing researchers to adopt the method with minimal effort and disruption.\n\n**Versatile Application**: By functioning as both a free data augmentation technique and a training warm-up approach, the proposed strategy provides flexible options for enhancing model training, making it applicable in various IR contexts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper revisits the infrared task by identifying its fundamental pixel-to-pixel regression nature and modeling it as an optimization problem from both intra-pixel and inter-pixel perspectives. It proposes a novel training strategy tailored to these observations, serving as a free data augmentation method or a warm-up approach for training. This paper’s strategy can be seamlessly integrated into existing supervised IR methods without additional burden, effectively introducing an inductive bias that enhances model performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Limited Early-Stage Effectiveness**: The TRAPS strategy relies on a gradual transition from intra-pixel to inter-pixel optimization, which may delay capturing complex content distributions early in training, potentially leading to slower initial convergence.\n\n**Dependency on Pre-Generated Indices**: The need to shuffle pixels according to pre-defined indices might introduce constraints, as it requires careful setup and may limit flexibility in adapting to varying IR tasks or datasets.\n\n**Potential for Overhead in Warm-Up Phase**: Although designed to streamline optimization, the warm-up phase might add computational overhead, as the network initially focuses on simplified pixel mappings, which could lengthen the overall training duration in some cases."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Train the network using the same settings as the comparison method to see if the performance can surpass the comparison method."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed approach enhances the network's performance without requiring additional training data or time."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new training strategy for image restoration tasks by modeling from both intra-pixel and inter-pixel perspectives. This approach enhances the network's performance without requiring additional training data or time."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The method lacks sufficient innovation and resembles more of a trick, which is not enough to support a paper at ICLR.\n2. The authors retrained all comparison methods during their experiments, yet the reported results for these methods are significantly lower than those in the original papers. This raises uncertainty as to whether the proposed method is only effective under the specific training conditions used by the authors. Even under these conditions, the performance improvement is minimal.\n3. The authors claim their method is designed for image restoration tasks, but the experiments only include image denoising and deblurring. There is a lack of experiments on other image restoration tasks, such as image super-resolution and deraining."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024haste,\ntitle={Haste Makes Waste: Teaching Image Restoration to Learn Distributions from Pixels to Patterns},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=59r0ntInvF},\nnote={under review}\n}"
},
"abstract": {
"value": "In this paper, we revisit the image restoration (IR) task and propose a new training strategy that models the IR problem as a distribution mapping challenge from two perspectives, i.e., (1) the intra-pixel regression and (2) the inter-pixel interaction. At the beginning of optimization, due to the pattern distribution involving a group of pixels within a neighborhood, it is not very easy for the model to capture such multi-pixel distribution mapping. A more optimal solution would be firstly teaching the model to learn a relatively simple yet important distribution w.r.t the pixel-by-pixel mapping between the degraded/clean pixels, as warming up. By doing so, the learned distribution is served as a prior, regarded as an injection of a kind of inductive bias into the model's whole optimization procedure. Subsequently, as conventional, the model is shifted to focus on the mapping distribution of the cross-pixel patterns, which ensures the consistency and fidelity of the image patterns. The final learned mapping is a joint distribution, which transfers the knowledge from the pixel distributions to the pattern ones. Experimental results indicate that under the compact and elegant training paradigm, the newly learned joint distribution is closer to the ideal one and yields a stronger representation ability, to circumvent the dilemma of the difficulty for existing methods to learn the patterns mapping distribution between degraded/clean images right off the bat."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Image Restoration",
"Low-level Vision",
"Training Strategy"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9ab9bf067e29f1d580f788b8887f5c8252e9323f.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Haste Makes Waste: Teaching Image Restoration to Learn Distributions from Pixels to Patterns"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5AB33izFxP | Simultaneous Online System Identification and Control using Composite Adaptive Lyapunov-Based Deep Neural Networks | main | Active | Adaptive control;Online Learning;Control Theory;Robotics | applications to robotics, autonomy, planning | 3;6;6;6 | 4;4;2;3 | 2;2;3;3 | 2;3;3;3 | 2;2;3;3 | 5.25 | 3.25 | 2.5 | 2.75 | 2.5 | -0.522233 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Shouldn’t there be an assumption for the activation functions to be convex? How do you deal with the nonconvex dependence of the underlying loss function on weights of hidden layers? Is it playing a role?\n\n2. How can input and state constraints be integrated in the proposed approach?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is on a very timely topic, DNN-based control. With the emergence of AI-based approaches, a principled treatment of a learning-based controller is an impactful contribution. Introducing a controller which allows for updates in all layers, in contrast to the state of the art where only thelast layers can be updated, and where system dynamics is not considered explicitely, is a big contribution. \nGiven the complexity of the paper, it is very clearly presented. The simulation examples are relevant and not just datasets, but dynamical systems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is introducing an adaptive DNN-based controller. Standard adaptive DNN-based controllers are based on Lyapunov-based analysis and allow updates on the last layers of the NNs only. They only use the tracking error as a metric to indicate when adaptation is needed, and only provide guarantees on the tracking error convergence. This paper introduces a dual (composite) method, for continuous system identification combined with trajectory tracking, and guarantees that the tracking error, state-derivative estimation error, and DNN weight estimation errors are uniformly ultimately bounded. The last two reflect identifying the dynamics of the system.\nThe system identification is performed via a dynamic state-derivative estimator and under the assumption of persitence of excitatiton. The controller is evaluated in simulation on a two-link manipulator system and an unmanned underwater vehicle system with intermittent loss of state feedback, and shows improvement compared to baseline methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In the simulations, there is only comparison to DNN-based controllers. \nIt would highly interesting to see how is the peroformance, compared to more standard controllers used in robotics (for example, MPC-based, or RL-based controllers). I am willing to raise my rating if this is added."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Could you relax Assumptions 1 and 2? In particular, as commented in Weakness, Assumption 1 is mathematically (and practically) severe. The authors comment that the developed methods can be extended to underactuated systems. However the details of the extension are scarcely explained, and no theoretical analysis is provided. The reviewer believes that the extension to underactuated cases and its convergence analysis should be the main contribution of this paper."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The simultaneous approach to online system identification and adaptation in the control logic, addressed in this paper, is well-motivated and justified with attractive numerical experiments. In addition, as claimed by the authors, the convergence analysis for the identification error and control error is novel."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses a methodology for simultaneously performing online system identification for the plant system and adaptation in the feedback control logic. Under some technical assumptions, stability conditions are presented, more specifically, the asymptotic stability of the equilibrium point of the entire feedback control system is ensured."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Assumptions 1 and 2 are mathematically severe. While this paper claims its contribution lies in simultaneous system identification and control, Assumption 1 implies that performing system identification can achieve arbitrary control performance (arbitrary system dynamics is realized by u= g^+(r - f(x,\\dot{x})). In this sense, the problem addressed in this paper is not essentially simultaneous. \n\nFurthermore, there are too many technical assumptions on the modeling accuracy, meaning the existence of \\bar{vareplison}, \\bar{\\theta}, etc."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1)\tThe existence of measurement noise should be considered, which is common in practical engineering; Please provide the control inputs of two simulation tests;\n2)\tI wonder if this method is effective for highly dynamic systems like quadcopters?\n3)\tProvide more experimental details, such as control inputs, weights update, and the selected control parameters."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This is first application of the Jacobian of the DNN to develop simultaneous online system identification and control. The theoretical content of the paper is good. The literature research is sufficient."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides the first result on simultaneous online system identification and trajectory tracking control of nonlinear systems using adaptive updates for all layers of the DNN. The Lyapunov-based stability analysis is provided, which guarantees that the tracking error, state-derivative estimation error, and DNN weight estimation errors are uniformly ultimately bounded."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The practical application of this method requires high computing resources and is not suitable for personal computers. The presence of measurement noise does not seem to be considered in the two simulation tests, which is unreasonable. Control inputs of two simulations should also be presented. Moreover, the nonlinear dynamics of the selected simulation system is weak."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. If the proposed framework could be combined with offline learning? It seems the proposed update strategy only relies on current measurements and has no historical data-mining procedure. It is a kind of traditional adaptive control, instead of modern data-based learning. Maybe a control journal is more applicable to this paper.\n\n2. If the considered uncertainty $f({x, \\dot{x}})$ can be extended to the composited disturbance, like $f({x, \\dot{x}, d})$, where $d$ denotes the external disturbance. It will be valuable in real applications."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The main contribution is that a Lpapunov-based adaptive framework is proposed to update **all layers** of DNN.\n2. Rigid convergence analysis.\n3. Two applied examples, despite only numerical simulations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a kind of Lpapunov-based adaptive framework, that can update all layers of DNN. The proposed method can handle nonlinear-in-parameters uncertainties. Moreover, a dynamic state-derivative estimator is utilized to obtain the state-derivative information. Overall, some novel theoretical results are developed in this paper with rigid proofs. The presentation is also clear. However, some drawbacks exist and many improvements can be further considered. There are some inappropriate statements and comparisons. The simulation tests are not enough to show its efficiency. Please refer to below for more details."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The Abstract is too long, preventing the reader from capturing key points quickly. It is recommended to only highlight the main contributions in the Abstract and technique details can be removed.\n\n2. It is claimed that the tracking, state-derivative, and weight estimation errors can be guaranteed to converge to bounded sets. **The factors that determine the upper bounds** of convergence sets should be provided in the Abstract.\n\n3. The previous work (OConnell, 2022) is compared in the Intro. It is claimed the limitation of the composite adaptive approach used by OConnell, 2022, is the inner-layer weight cannot be online updated. However, the considered case of OConnell, 2022 is different from the one of this paper. OConnell mainly focuses on a composited disturbance, which comes from external disturbance and internal state-related uncertainties. The last layer of DNN is updated online to handle external disturbances, while the inner layers correspond to internal state-related uncertainties, which would not change in application. However, the internal state-related uncertainty is mainly considered in this paper, i.e., $f({x, \\dot{x}})$. **The direct comparison with (OConnell, 2022) is inappropriate**.\n\n4. One important problem is only simulation examples are demonstrated in this paper, and no noises exist in the measured states, despite the theorems that seem to be relatively complete. A small upper bound of the convergence set depends on a large gain. However, the gain may enlarge the noises in a real system. Thus, **the effect of the real application is questionable**.\n\n5. In the simulation of two link manipulators, it is recommended to cover the ESO comparison and the composite adaptive method developed in (Slotine and Li, 1989). The gain selection strategy for all comparison methods should be provided to ensure fairness."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A composite adaptive deep neural network controller is developed with adaptation law comprising of the tracking error and prediction error to update the weights of all layers of the DNN."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024simultaneous,\ntitle={Simultaneous Online System Identification and Control using Composite Adaptive Lyapunov-Based Deep Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5AB33izFxP},\nnote={under review}\n}"
},
"abstract": {
"value": "Although deep neural network (DNN)-based controllers are popularly used to control uncertain nonlinear dynamic systems, most results use DNNs that are pretrained offline and the corresponding controller is implemented post-training. Recent advancements in adaptive control have developed controllers with Lyapunov-based update laws (i.e., control and update laws derived from a Lyapunov-based stability analysis) for updating the DNN weights online to ensure the system states track a desired trajectory. However, the update laws are based on the tracking error, and offer guarantees on only the tracking error convergence, without providing any guarantees on system identification. This paper provides the first result on composite adaptive Lyapunov-based update law, involving a combination of the tracking error and a prediction error of the system dynamics, to adjust the weights of all layers of a DNN-based controller. As a result, the DNN can rapidly adapt to simultaneously achieve the goals of tracking a desired trajectory and identifying the system dynamics online. Since evaluating the DNN's prediction error typically requires state-derivative information, a dynamic state-derivative estimator is developed and interlaced with the weight update law. A combined Lyapunov-based stability analysis is provided, which guarantees that the tracking error, state-derivative estimation error, and DNN weight estimation errors are uniformly ultimately bounded. Additionally, a persistence of excitation (PE) condition is developed for the DNN. When the PE condition is satisfied, significantly tighter bounds are obtained on the tracking and weight estimation errors, thus achieving system identification and enhanced trajectory tracking performance. As an outcome of the system identification, the DNN model can be propagated forward to predict and compensate for the uncertainty in dynamics under intermittent loss of state feedback. Comparative simulation results are provided on a two-link manipulator system and an unmanned underwater vehicle system with intermittent loss of state feedback, where the developed method yields significant performance improvement compared to baseline methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Adaptive control",
"Online Learning",
"Control Theory",
"Robotics"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cc275bb217aed56a97413df094393185f491e801.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/1d455e5df379b8d5f710c21f8216c452ec750f76.zip"
},
"title": {
"value": "Simultaneous Online System Identification and Control using Composite Adaptive Lyapunov-Based Deep Neural Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5AJ8R4z5g0 | Potential Outcomes Estimation Under Hidden Confounders | main | Active | Confounders;Causal Inference;Treatment Effects | causal reasoning | 1;3;3;6 | 4;4;4;3 | 2;2;2;3 | 2;1;2;2 | 2;2;3;3 | 3.25 | 3.75 | 2.25 | 1.75 | 2.5 | -0.889297 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Dear reviewer aUnR,\n\nI am the student author, and I am writing to sincerely apologize for the oversight in copying author names from the NIH website, specifically from: https://pubmed.ncbi.nlm.nih.gov/38641741/\n\nWhile translating the reference from the NIH website into bibtex format, I inadvertently made errors in transcribing the full names. The original reference was provided in the following style:\n\nFeuerriegel S, Frauen D, Melnychuk V, Schweisthal J, Hess K, Curth A, Bauer S, Kilbertus N, Kohane IS, van der Schaar M. Causal machine learning for predicting treatment outcomes. Nat Med. 2024 Apr;30(4):958-968. doi: 10.1038/s41591-024-02902-1. Epub 2024 Apr 19. PMID: 38641741. \n\nIn the process of converting this reference to a bibtex format, I mistakenly altered some of the names. I cannot pinpoint precisely how this happened, but I want to clarify that the reference was not generated from scratch by a language model (LLM). I used tools like Writefull and Grammarly at the sentence level (as disclosed in the submission), and these may have unintentionally modified the names.\nNote that all other references are correct as I just double checked them and there were no errors in the other names. \n\nI want to assure you that no part of the paper was generated by an LLM, aside from sentence-level edits as disclosed. \n\nBest,\nStudent Author"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "My bad--his title is Senior Program Chair. Somehow there are too many different titles, and I thought it was the same."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "My bad--his title is Senior Program Chair"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "I had a discussion on email with Carl Vondrick which the ICLR webpage calls him a Senior Area Chair"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "I did have a discussion with Carl Vondrick<[email protected]>"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Hi Senior Author, \n\nI am your Senior Area Chair, and you did not have a discussion with me. Please refrain from claiming that you have had interactions that you have not. \n\nBest,\nYour Senior Area Chair"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Untrue Claim By Senior Author"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "I just had an email discussion with the senior program chair.\n\nAgain, you made some very serious allegations, and it was so serious that made me extremely upset. I do not get upset by a paper being rejected (as I said we all know about the quality of the reviews in ML conferences), but these kinds of allegations are too much.\n\nI have foreign students who are not familiar with other national names, and one of them had a couple of typos in entering a first name or two in one reference (while working very late). I am sorry if you may be one of the authors of that paper. \n\nJust so that everyone knows, I always re-write the entire text of the paper as students' English is not perfect. Granted that I missed a couple of typos in one of the references but the allegation that we produced this paper with LLMs is ridiculous.\n\nNow you say:\n\n\"Directly at the beginning of the reviewing period, I wrote a comment to the program committee about potential ethical concerns. I thought that this comment would become public during rebuttal time. Therefore, I referred to the \"comment above\" in my review. As this is not the case, I will repost my comment below.\"\n\nWhat makes you think a couple of typos make a paper LLM generated while your own review is so incoherent?\n\nIn anyways, my comment that your review was LLM generated was out of being provoked by your allegations. I am sorry about that. However, I maintain that your review is incoherent\\ and has many issues. That said, I appreciate the time that you put into it. I do not claim to be a better reviewer than you are\n\nBest wishes\n\nSenior Author"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "I just had a discussion with Senior Program Chair"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "You made very serious allegations. Now you say there were some typos.\nSince you do not apologize and do not seem to retract your statements/reviews, I will have to write to the TPC chairs and demand an investigation.\n\nYour review is non-sense, but as I said, I am used to nonsense. Your comments are preposterous.\n\nSenior author"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Is this an apology?"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Dear authors,\n\ndear PC, SAC, Ac,\n\nIf there is reason to believe my review is machine-generated or in some way uncoherent, I am happy to explain my comments in more detail. Directly at the beginning of the reviewing period, I wrote a comment to the program committee about potential ethical concerns. I thought that this comment would become public during rebuttal time. Therefore, I referred to the \"comment above\" in my review. As this is not the case, I will repost my comment below. \n\nOf note, the reviewers make **the same mistake** in their official answer to my review, accusing me of providing unsubstantiated, misleading, and unethical reviews. The next time the authors make such a strong statement I would highly encourage them to check on the correctness of their answer.\n\n*Dear AC, SAC, PC,*\n\n*While reviewing the paper, some ethical concerns regarding truthful scientific practice and the misuse of large language models arose. I would like to ask your opinion on this matter and the potential consequences.*\n\n*Specifically, the concerns arose when reading this version of a reference stated in the submitted paper:*\n\n*Stefan Feuerriegel, **Daniel** Frauen, **Viktoria** Melnychuk, **Julian** Schweisthal, **Katharina** Hess, Alicia Curth, **Sebastian** Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. Causal machine learning for predicting treatment outcomes. Nature Medicine, 30(4):958–968, 2024.*\n\n*The correct reference should read:*\n\n*Stefan Feuerriegel, **Dennis** Frauen, **Valentyn** Melnychuk, **Jonas** Schweisthal, **Konstantin** Hess, Alicia Curth, **Stefan** Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. Causal machine learning for predicting treatment outcomes. Nature Medicine, 30(4):958–968, 2024.*\n\n*I can only explain this uncommon mistake through the use of LLMs. I am aware that the stated issue is not extremely severe. However, it raises questions regarding the misuse of LLMs in other parts of the work.*\n\n*Best regards,*\n\n*Reviewer aUnR*"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Answer to unjustified accusation"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Dear TPC Chairs, Senior Area Chairs, Area Chairs\n\nI am the senior author of this submission. I am writing to report a potential violation of ethics codes by reviewer aUnR.\nAs you can see the reviewer makes the allegation that the citations of our paper are non-existent, fake and machine generated and that the paper in part may be LLM generated. \n\nIn the response that I just posted, we gave every single citation of the paper and provided links to every citation. This demonstrates the falseness of allegations made by the reviewer.\n\nWhile we value constructive criticism, the reviewer makes several baseless claims, vague and non-sensial critiques, and the review has many frequent inaccuracies. We believe that these issues hinder the academic integrity of the review process. \n\nNote that I am a seasoned researcher and I and my students are used to low quality reviews and materially wrong reviews at ML conferences, but I find the reviewer aUnR baseless allegations very outrageous.\n\nI am writing to you to demand an ethics investigation into reviewer aUnR actions in this matter. From reading his/her/their review, I suspect that this review may be machine generated in part (as it is very non-coherent).\n\nI will write to you directly (outside of the Openreview platform) to formally demand an investigation unless the reviewer aUnR retract his/her/their review.\n\nBest regards, \nSenior Author"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Demanding Investigation of Reviewer aUnR for Ethical Violation"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "As shown, there is no evidence to support your claim of false references. Furthermore, suggesting that more of our manuscript may be generated by an LLM, based solely on this incorrect assertion, is speculative and lacks any concrete basis.\n\nIncorrect Line References:\n\nSeveral of the reviewer’s comments are linked to irrelevant line numbers, making them difficult to address:\nLine 58: You reference this line to be a paragraph headline, yet it points to Figure 1.\nLine 70: Your comment about missing references is directed at a figure caption, where no references are applicable.\nLine 35: There is no discussion of consistency at this line, contrary to your claim.\nThese errors suggest a significant lack of attention to the manuscript or confusion about its content.\n\nVague and Unhelpful Critiques:\nYou state that our presentation \"contains errors, hindering easy understanding of the line of thought,\" without specifying any instance of such errors. This type of feedback is too vague to be actionable, and we ask for specific examples to improve our work meaningfully.\n\nTechnical Misunderstandings:\nYou question the definition of $g$ in Proposition 3.2, although it is explicitly defined within the proposition. This indicates either a lack of familiarity with standard mathematical notations or insufficient attention to the manuscript. \n\n\n\nConclusion:\n\nWhile we value constructive criticism, your review includes several baseless claims, vague critiques, and frequent inaccuracies. We believe that these issues hinder the academic integrity of the review process. We are used to low quality reviews and materially wrong reviews at ML conferences, but your baseless allegations are outrageous. \n\nWe request that either you immediately show some remorse, and enter a serious apology or else we will write to technical committee with the screenshot of your dishonest comments included and request an ethics investigation in this matter.\n\n Best regards,\nThe Authors."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Continuation Response to your wrong, unethical, careless and probably machine generated allegations/review (2)"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "[15] Tobias Hatt, Daniel Tschernutter, and Stefan Feuerriegel. Generalizing off-policy learning under sample selection bias. In Uncertainty in Artificial Intelligence, pp. 769–779. PMLR, 2022b.\nLink: https://proceedings.mlr.press/v180/hatt22a.html\n\n[16] Miguel A Hernán and James M Robins. Causal Inference: What If. Chapman & Hall/CRC, BocaRaton, 2020.\nLink: https://www.hsph.harvard.edu/miguel-hernan/wp-content/uploads/sites/1268/2024/01/hernanrobins_WhatIf_2jan24.pdf\n\n[17] Jennifer L Hill. Bayesian nonparametric modeling for causal inference. Journal of Computational and Graphical Statistics, 20(1):217–240, 2011.\nLink: https://www.tandfonline.com/doi/abs/10.1198/jcgs.2010.08162\n\n[18] Guido W Imbens. Causal inference in the social sciences. Annual Review of Statistics and Its Application, 11, 2024.\nLink: https://www.annualreviews.org/content/journals/10.1146/annurev-statistics-033121-114601\n\n[19] Guido W Imbens and Donald B Rubin. Causal inference in statistics, social, and biomedical sciences. Cambridge University Press, 2015.\nLink:https://www.cambridge.org/core/books/causal-inference-for-statistics-social-and-biomedical-sciences/71126BE90C58F1A431FE9B2DD07938AB\n\n[20] Nathan Kallus and Angela Zhou. Confounding-robust policy improvement. Advances in neural information processing systems, 31, 2018.\nLink: https://papers.nips.cc/paper_files/paper/2018/hash/3a09a524440d44d7f19870070a5ad42f-Abstract.html\n\n[21] Nathan Kallus, Aahlad Manas Puli, and Uri Shalit. Removing hidden confounding by experimental grounding. Advances in neural information processing systems, 31, 2018\nLink: https://papers.nips.cc/paper_files/paper/2018/hash/566f0ea4f6c2e947f36795c8f58ba901-Abstract.html\n\n[22] Nathan Kallus, Xiaojie Mao, and Angela Zhou. Interval estimation of individual-level causal effects under unobserved confounding. In The 22nd international conference on artificial intelligence and statistics, pp. 2281–2290. PMLR, 2019.\nLink: https://proceedings.mlr.press/v89/kallus19a.html\n\n[23] Robert J LaLonde. Evaluating the econometric evaluations of training programs with experimental data. The American economic review, pp. 604–620, 1986.\nLink: https://www.jstor.org/stable/1806062\n\n[24] Haoxuan Li, Kunhan Wu, Chunyuan Zheng, Yanghao Xiao, Hao Wang, Zhi Geng, Fuli Feng, Xiangnan He, and Peng Wu. Removing hidden confounding in recommendation: a unified multi-task learning approach. Advances in Neural Information Processing Systems, 36, 2024.\nLink: https://openreview.net/forum?id=4IWJZjbRFj\n\n[25] Valentyn Melnychuk, Dennis Frauen, and Stefan Feuerriegel. Bounds on representation-induced confounding bias for treatment effect estimation. In The Twelfth International Conference on Learning Representations, 2024\nLink: https://openreview.net/forum?id=d3xKPQVjSc.\n\n[26] Karl Popper. The logic of scientific discovery. Routledge, 2005.\nLink: https://www.taylorfrancis.com/books/mono/10.4324/9780203994627/logic-scientific-discovery-karl-popper-karl-popper\n\n[27] Jonathan Richens and Tom Everitt. Robust agents learn causal world models. In The Twelfth International Conference on Learning Representations, 2024.\nLink: https://openreview.net/forum?id=pOoKI3ouv1\n\n[28] Paul R. Rosenbaum. Observational Studies. Springer, New York, 2nd edition, 2002.\nLink: https://link.springer.com/book/10.1007/978-1-4757-3692-2\n\n[29] Paul R Rosenbaum and Donald B Rubin. The central role of the propensity score in observational studies for causal effects. Biometrika, 70(1):41–55, 1983.\nLink: https://academic.oup.com/biomet/article/70/1/41/240879\n\n[30] Jonas Schweisthal, Dennis Frauen, Mihaela Van Der Schaar, and Stefan Feuerriegel. Meta-learners for partially-identified treatment effects across multiple environments. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 43967–43985. PMLR, 21–27 Jul 2024.\nLink: https://proceedings.mlr.press/v235/schweisthal24a.html\n\n[31] Uri Shalit, Fredrik D Johansson, and David Sontag. Estimating individual treatment effect: generalization bounds and algorithms. In International Conference on Machine Learning, pp. 3076–3085. PMLR, 2017.\nLink: https://proceedings.mlr.press/v70/shalit17a.html\n\n[32] Claudia Shi, David Blei, and Victor Veitch. Adapting neural networks for the estimation of treatment effects. Advances in neural information processing systems, 32, 2019.\nLink: https://papers.nips.cc/paper_files/paper/2019/hash/8fb5f8be2aa9d6c64a04e3ab9f63feee-Abstract.html\n\n[33] Jeffrey A Smith and Petra E Todd. Does matching overcome lalonde’s critique of nonexperimental estimators? Journal of econometrics, 125(1-2):305–353, 2005.\nLink: https://www.sciencedirect.com/science/article/pii/S030440760400082X\n\n[34] Stefan Wager and Susan Athey. Estimation and inference of heterogeneous treatment effects using random forests. Journal of the American Statistical Association, 113(523):1228–1242, 2018.\n\nLink: https://www.tandfonline.com/doi/full/10.1080/01621459.2017.1319839"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Continuation Response to your wrong, unethical, careless and probably machine generated allegations/review (1)"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5Aem9XFZ0t | Zero-shot Concept Bottleneck Models via Sparse Regression of Retrieved Concepts | main | Active | concept bottleneck models;interpretability;retrieving;sparse linear regression;vision-language models | interpretability and explainable AI | 1;5;5;5;6;6 | 4;2;5;3;5;5 | 4;2;3;3;3;3 | 1;3;2;2;3;3 | 3;3;3;3;3;3 | 4.666667 | 4 | 3 | 2.333333 | 3 | 0.169842 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Why was linear regression used instead of lasso in L426-427?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written and easy-to-follow.\n- The main idea is straightforward and intuitive.\n- Target task performance is competitive. Table 1 shows that the proposed method even outperforms the performance of the original CLIP, and Table 2 shows that a simple trainable variant of the proposed method outperforms the previous method in the same setting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses zero-shot scenario for concept bottleneck models (CBMs). Previous methods successfully eliminate a dependency of manually annotated concept labels via large language models (LLMs) and vision-language models (VLMs). However, they still require training the models on target dataset and not applicable to the zero-shot scenarios. Zero-shot CBMs (Z-CBMs) constructs large-scale concept bank using caption datasets and noun parser, retrievals concept candidates following input images, and predicts final labels using the retrieved concepts. The experiments demonstrate the effectiveness of the proposed method on target task performance and interpretability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The reason for performance improvement compared to the original CLIP is unclear. The paper argues that it is due to a reduced modality gap in the concept-to-label mapping. However, this claim is not fair since the modality gap still exists in the input-to-concept mapping. Furthermore, since CLIP is trained on image-to-text matching, the claim that performance improves due to a reduced modality gap in text-to-text matching also requires sufficient references.\n\n- I'm not entirely clear on the advantages of this approach over the most basic interpretable approach based on CLIP. Specifically, one could retain the standard CLIP classification process and simply retrieve concepts from a concept bank using visual features for interpretability. While this baseline is hard to address concept intervention, it doesn't seem to offer significant differences in terms of interpretability.\n\n- The performance difference between linear regression and lasso in Table 6 is unclear. Linear regression should estimate the original visual features ($f_V(x)$) more accurately, so why does linear regression perform so poorly here?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "-"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can you provide more detailed comparisons to prior work by directly using their concept sets? I am a bit lost in understanding how much the comparisons to prior work are directly one-to-one comparable and what elements make a (positive/negative) difference?\n- How does the hyper-parameters like lambda for linear regression and lasso affect the results in terms of Table 1, 2, 3 results?\n- In regards to the performance gains over CLIP image embedding: how well the method performs if you were to use a random matrix as $F_{C_x}$ in Eq 3 & 4, instead of true concept embeddings, for various K values?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper’s work is an interesting addition to the research on CBMs. It addresses the missing supervision problem that seems to remain unaddressed in prior CBM work and tackles it in a relatively meaningful manner.\n- The XAI-performance results in Table 3 look impressively good.\n- The method is simple and easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a “zero-shot” concept bottleneck model (CBM). The original idea in CBM is to first represent a given image in the concept space, ie. as a weighted combination of existing concepts and then classifying the image using these concept-weights based image representation. The original works rely supervised training to build image→concept and concept→class predicts. More recent works reduce labelling efforts by leveraging the prior knowledge available in vision-language models (VLMs) and LLMs, e.g. “Label-Free Concept Bottleneck” and others.\n\nThe recent works on avoiding supervised concept predictor training however still require a training dataset to train concept→class predictor. This paper basically aims to avoid supervision altogether. The proposed method semi-automatically builds a large concept vocabulary (over existing image caption datasets) , selects the most relevant concepts specifically for a given image according to the image-text representations of a pretrained VLM (eg CLIP) and learns an image-specific concept-text-embeddings to image-embeddings mapping based on reconstruction loss. The resulting concept-embedding based approximation to image’s representation is used to classify the image based on cosine similarity to the textual embeddings of target class labels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper’s results in Table 1 are not impressive , and this is very much expected as the learned CBM-based representation is afterall an image-specific approximation to the image’s visual representation. It is “normal” that it performs very similar to the CLIP baseline, and I am not sure if the improved performance implies any significant achievement, as the paper lacks any substantial analysis on it (despite commenting that it might be thanks to the reduced representation gap).\n- It seems to me that the paper’s results in Table 2 (CLIP-Score) can be misleading because it seems to be measuring the average correlation between the image’s CLIP-image representation and the obtained CBM representation. As the CBM representation of this paper is a direct reconstruction of CLIP-image representation, it seems again “normal” (not interesting) to observe high scores. (Please correct me if I’m missing something here.)\n- How were the hyper-parameters like lambda tuned? Is lambda (and other hyper-parameters, if exists) all the same across all experiments and all datasets? What methodology was used? ie. if one wants to reproduce the exact results from scratch, how should he/she tune the hyper-parameter(s) to reach the same value(s).\n- The paper seems to be missing one relevant paper from the zero-shot learning domain: “Attributes2Classname: A discriminative model for attribute-based unsupervised zero-shot learning”. Similar to the proposed work, this paper learns to represent images in terms of a linear transformation of relevant concepts’ (predicted attributes’) textual embeddings, with and without labelled image dataset. It seems to share many motivations like reducing modality gap via representing images in terms of a combination of concept (attribute) textual embeddings and avoiding image supervision, and therefore can/should be discussed within the paper.\n- The method heavily relies on the prior knowledge of pre-trained VLM (CLIP), and therefore, cannot be used in incompatible domains; unlike (more) supervised CBMs. In that sense, as this paper already relies on a huge training set that the VLM pre-training requires, it is not clear if any real achievement is made in terms of building human-understandable concept-based image representations with reduced supervision, from a philosophical point of view."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "None."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) The method is simple.\n\n(2) The results are good. The authors show that their ZS-CBM achieves SoTA accuracy among prior CBMs. They also demonstrate the quality (relevance) of the selected concepts using CLIP-score results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a variant of concept bottleneck models (CBM) which uses sparse linear regression on a databank of concepts to approximate the visual feature of each image. The resulting CBM can achieve reasonable zero-shot accuracy and CLIP-score without additional training. The proposed framework does not require additional data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I can't find anything wrong with this paper except perhaps the lack of technical innovation. There is abundant literature on concept bottleneck models. Sparse regression on concept features is very widely used. Using retrieval to find relevant concepts is not technically interesting. In my opinion, this work does not add much value to the existing CBM literature."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. This article compares various methods related to the concept bank. However, I may have overlooked the testing results for a specific approach: constructing a concept bank using a question-and-answer method similar to the label-free CBM [1]. This involves designing a smaller concept bank tailored to the problem domain.\n\n2. In this paper, it is mentioned that the regular term in sparse regression can help reduce conceptual redundancy. Could you please provide some specific visual results to illustrate this effect? Additionally, I’m curious about the advantages of sparse regression compared to using distance or other metrics in feature space to determine weights. If there are any experimental results that demonstrate this comparison, it would certainly enhance the persuasiveness of the method presented in your paper. \n\n3. I noticed the inference time presented in Figure 6. Could the authors clarify whether this represents the total time for the entire zero-shot inference process? As the scale of the concept bank expands, it is important to understand how embedding and concept retrieval times may increase. I would appreciate it if the authors could provide a breakdown of the reported times, detailing the components of the inference process (e.g., embedding, concept retrieval, regression) and how these times are affected as the concept bank size increases."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. To the best of my knowledge, this paper is the first to propose a zero-shot Concept Bottleneck Model (CBM), marking a significant contribution to the field of CBMs. Furthermore, the proposed zero-shot CBM method exhibits predictive capabilities comparable to those of CLIP, while its architecture enhances the model's interpretability.\n\n2. The experiments presented in this paper are comprehensive and well-executed, encompassing 12 datasets. Despite the absence of suitable benchmarks, the authors have effectively compared their method with zero-shot CLIP and other training head approaches.\n\n3. This paper introduces the concept of a \"concept bank\" and employs an efficient concept retrieval method for label prediction based on this foundation. The concept bank is constructed through the analysis of extensive datasets. In Section 4.6.2 and Table 1, the authors provide a detailed comparison of zero-shot performance across different sizes of concept banks, demonstrating that expanding the concept bank enhances the expressive capacity of the CBM, thereby improving its zero-shot performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper utilizes a large-scale concept bank and a dynamic concept retrieval method to make high-accuracy predictions without requiring additional training on a target dataset. By employing sparse regression to identify and weigh the importance of relevant concepts from the bank, Z-CBMs achieve effective concept-to-label mappings while ensuring model interpretability, addressing the limitations of previous concept bottleneck models that relied on extensive manual data collection and training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While this article provides a valuable comparison of various methods related to the concept bank, it appears that the testing results for a specific approach—constructing a concept bank using a question-and-answer method similar to the label-free CBM [1]—are not included. Including this method, particularly in the context of designing a smaller, domain-specific concept bank, could enhance the comprehensiveness of the analysis. I encourage the authors to include a comparison with a concept bank generated using the question-and-answer approach from the label-free CBM, as this would provide a deeper understanding of the different concept bank construction approaches.\n\n2. The paper mentions that the regular term in sparse regression can help reduce conceptual redundancy; however, it lacks specific visual results to illustrate this effect. Additionally, the advantages of using sparse regression in comparison to other distance metrics in feature space for weight determination are not clearly established. To strengthen the paper, I suggest that the authors provide visual examples comparing the concepts selected by sparse regression versus other methods, demonstrating how redundancy is reduced. Furthermore, including a quantitative comparison of sparse regression against other weighting methods would enhance the clarity and convincing nature of the proposed method.\n\n\nReference:\n[1]. Oikarinen, Tuomas, et al. \"Label-free concept bottleneck models.\" arXiv preprint arXiv:2304.06129 (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The main question is how is this work different from [**ConSe 2014**] (and other similar works), and then beyond that they use an ImageNet classifier to transform images to text, and here a CLIP space has been used. So, please clarify what novel contributions the method makes beyond using a CLIP embedding space instead of Word2Vec + ImageNet classes? \n\n2. Please clarify: (a) the used CLIP similarity function, (b) Fcx W being normalized, (c) the influence of lambda.\n\n3. Please discuss the open directions (taken from previous research): weighing based on target classes, restricting W to positive weights only, using negative concepts in a proper manner, using similarity value.\n\n3. From Figure 3 it becomes clear that some concepts are negated, for example `NOT macro rope` (bottom row, right). How is this defined? Is the `not` a part of the concept, and hence used encoded in `f_T(concept)` vector, or is the `not` a result of the linear regression, for these concepts with a negative weight in W? Please elaborate whether it is conceptually desired that concepts in the top K most related concepts for an image could be negative weighed for the image-text embedding."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The ideas presented in this paper make a lot of sense, and the manuscript is clearly written. The relevance becomes clear from the amount of related research in this direction of ‘attribute-based’ or ‘concept-based’ zero-shot classification, which dates back at least to 2010s. However, this is also the largest weak point of the paper, the novelty compared to papers and ideas presented back then is not clearly stated, nor is the paper compared to any other zero-shot method besides retrieving in the CLIP space. Of course some techniques / methods did not exist back then (eg the CLIP embedding space), that does not make this paper substantially different."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper zero-shot concept bottleneck models are discussed as a means of obtaining explainable (in terms of concepts) zero-shot classifiers (as in classes without explicit training data). The idea is to use a concept bottleneck model to translate an image into a set of visual concepts, and then use these concepts to classify the image into one of the target classes of the benchmark dataset. Since the train/test data of the benchmark data is not explicitly used, this is a form of zero-shot classification. In the proposed method, the concept bank consists of about 5M concepts obtained from image captioning datasets (including e.g. Flickr-30K and the YFCC-15M dataset). The image and all the concepts are encoded in the CLIP embedding space, and then the top K most similar ones are used. From this set of concepts a sparsely weighted CLIP feature vector is constructed, which is then used to find the nearest target class y. This model is evaluated on 12 classification tasks and performs similar to zero-shot CLIP."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "# Major weakness\nThe major weakness of this submission is the novelty with respect to previous work (likely of a previous generation, before deep learning took off). The idea of zero-shot classification in a visual-semantical space based on a joint embedding is not novel. A good example is [**ConSe 2014**], where imagenet classifiers are used together with a Word2Vec space to compose a Word2Vec embedding for an image (based on the classifier outputs and the word embeddings of the class names], which is then used for zero-shot classification in text space. This is extremely similar to the posed idea, except that now a CLIP space is used. Also the idea of using a (sparse) regression of the concepts has been explored before [**Write 2013, Costa 2014, Objects2Actions 2015**]. None of these papers uses an explicit attribute/concept-to-class mapping as the seminal work of Lampert et al. [**AwA 2013**], they all used a discovered attribute-to-class mapping based on an embedding space [**ConSe 2014, Objects2Actions 2015**] or based on co-occurrence statistics or web search [**Costa 2014**], including co-occurences from the YFCC dataset, also used in this work.\n\nThe *only* difference I see with respect to these works, is that the concept bank used in this paper is much larger and that a CLIP embedding is used. Based on the previous works, the following questions are interesting, but not explored in this submission:\n- The weighting of concepts is now based on the input image, it could also be done based on the target classes (ie, each class selects the top-K concepts which are most similar, or find the most co-occurring concepts in the captioning datasets)\n- The weights of a concept in the linear regression model can be negative, this is unlikely to be beneficial given that the used concepts are the top-K most relevant for this particular image. Would it make sense to restrict W to be positive? \n- What is the influence of lambda on the performance? And on the sparsity? Is the optimal lambda dataset specific? It seems that the current value (1x10-5) is extremely small, compared to the size of W (which has K weights, with K ~1000). \n- Using proper negative concepts for a class is likely to be beneficial, given that knowing what is not related to the target class is a strong signal, could that be explored as well?\n- What similarity function is used in the clip space? Is it cosine similarity? Is Fcx W normalized?\n- The similarity between a concept and the image is now an indicator function only (concept in top-K concepts for this image). While, the similarity value might contain a strong signal of relevance. It could make sense to use the similarity value between the image and the concepts also in constructing the concept clip embedding of the image.\n\n# Secondary weaknesses / suggestions\n1. The second step, the final label prediction (Eq 4) is a purely textual reasoning problem. In the light of the enormous reasoning power of the LLMs, it could be explored if LLMs would be able to reason about the final class provided the top-K concepts from the previous stage.\n\n2. A suggestion for an additional exploration. In this submission, the CLIP space is searched in a cross-modal setting, from an input image to a target/output text. While in [**ReCo 2024**] it has been shown that uni-modal search works much better (image-image) and then use cross-modal fusion (use the textual description of that image). This could be exploited (e.g.) by using (image, caption) pairs from the image datasets. It would be interesting to study if different search strategies improve the zero-shot classification performance.\n\n## Minor/Nitpicks\n- In table 1: the bold facing of performance should include the zero-shot/linear-probe CLIP.\n- It is unclear why the zero-shot CLIP model should be considered as the upper bound of the proposed method. The proposed method uses the (implicit) knowledge of millions of additional (image, text) pairs. \n\n# References\n\n- [**AwA 2013**]: Attribute-based classification for zero-shot visual object categorization, TPAMI 2013.\n- [**ConSe 2014**]: Zero-Shot Learning by Convex Combination of Semantic Embeddings, ICLR 2014.\n- [**Costa 2014**]: COSTA: Co-Occurrence Statistics for Zero-Shot Classification, CVPR 2014.\n- [**Objects2Actions 2015**]: Objects2action: Classifying and localizing actions without any video example, ICCV 2015.\n- [**Write 2013**]: Write a Classifier: Zero-Shot Learning Using Purely Textual Descriptions, ICCV 2013.\n- [**ReCo 2024**]: Retrieval Enhanced Contrastive Vision-Text Models, ICLR 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The authors may consider comparing the inference speed between the proposed approach and existing CBMs.\n\nI’m wondering whether the visual and textual features are in the same space as shown in Fig 2 (a) for fitting image features with textual features of candidate concepts, considering that they are from two modalities and in the pre-training stage, the text features and visual features are aligned by cross-entropy loss rather than strictly calibrated by L2 Loss. The authors may consider showing a t-SNE figure to clarify this.\n\nThe authors may consider evaluating the interpretability of the candidate concepts. In my opinion, concepts such as “Not maltese dog terrier” cannot provide interpretable information for identifying categories."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper provides a novel interpretable zero-shot image classification method.\n\nCompared with existing Concept Bottleneck Models, the proposed method eliminates the requirement of labeled training data.\n\nThis paper provides a tool for researchers to understand the semantics of CLIP-extracted visual features."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a novel approach for achieving zero-shot image classification based on explainable Concept Bottlenecks. Compared with existing Concept Bottleneck Models, the authors proposed approach gets rid of the requirement of labelled training data for learning the mapping network from concepts to categories by fitting the image representation with concept features. The experimental results verified the effectiveness of this approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The inference cost is significantly increased due to the extremely large concept bank and the test-time learning process.\n\nThis paper lacks discussion of other training-free concept bottleneck approaches, e.g., “Visual Classification via Description from Large Language Models”."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an interpretability model family called zero-shot concept bottleneck models, which can provide concept-based explanations for its prediction in fully zero-shot manner."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024zeroshot,\ntitle={Zero-shot Concept Bottleneck Models via Sparse Regression of Retrieved Concepts},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5Aem9XFZ0t},\nnote={under review}\n}"
},
"abstract": {
"value": "Concept bottleneck models (CBMs) are inherently interpretable neural network models, which explain their final label prediction by high-level semantic \\textit{concepts} predicted in the intermediate layers. Previous works of CBMs have succeeded in achieving high-accuracy concept/label predictions without manually collected concept labels by incorporating large language models (LLMs) and vision-language models (VLMs). However, they still require training on the target dataset to learn input-to-concept and concept-to-label correspondences, incurring target dataset collections and training resource requirements. In this paper, we present \\textit{zero-shot concept bottleneck models} (Z-CBMs), which are interpretable models predicting labels and concepts in a fully zero-shot manner without training neural networks. Z-CBMs utilize a large-scale concept bank, which is composed of millions of noun phrases extracted from caption datasets, to describe arbitrary input in various domains. To infer the input-to-concept correspondence, we introduce \\textit{concept retrieval}, which dynamically searches input-related concepts from the concept bank on the multi-modal feature space of pre-trained VLMs. This enables Z-CBMs to handle the millions of concepts and extract appropriate concepts for each input image. In the concept-to-label inference stage, we apply \\textit{concept regression} to select important concepts from the retrieved concept candidates containing noisy concepts related to each other. To this end, concept regression estimates the importance weight of concepts with sparse linear regression approximating the input image feature vectors by the weighted sum of concept feature vectors. Through extensive experiments, we confirm that our Z-CBMs achieve both high target task performance and interpretability without any additional training."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"concept bottleneck models",
"interpretability",
"retrieving",
"sparse linear regression",
"vision-language models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cbb14858701db204b7603bbb3643e84d9163813d.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/313e24061bf88d7557d235acb657196f824e00b5.zip"
},
"title": {
"value": "Zero-shot Concept Bottleneck Models via Sparse Regression of Retrieved Concepts"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5AoOHSickG | FoundationForensics: Traceback Backdoor Attacks for Vision Foundation Models | main | Active | Backdoor Attacks;Foundation Models | alignment, fairness, safety, privacy, and societal considerations | 5;5;5;6 | 4;3;4;3 | 3;2;2;3 | 2;2;2;3 | 2;3;3;2 | 5.25 | 3.5 | 2.5 | 2.25 | 2.5 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Threat Model Assumptions: The reliance on a clean and poisoned sample pair weakens the overall contribution of the proposed technique. Under this threat model, a simpler and more intuitive approach could involve cropping the backdoor trigger from the detected sample and comparing it with training samples. Given that the paper primarily evaluates patch triggers, such a basic method might suffice and diminish the necessity for the more complex FoundationForensicsframework.\n\n2. Scalability: The paper does not adequately address the scalability of FoundationForensics, which is crucial given the large-scale, unlabeled data (e.g., LAION[1], DataComp[2]) typically used for training vision foundation models. The current evaluations are limited to small datasets, not reflective of real-world training scales. Although there may not be significant technical barriers to extending the framework to larger datasets, concerns about computational overhead and the storage required for intermediate model checkpoints remain. Additional experiments and discussions in this context would strengthen the paper.\n\n3. Theoretical Analysis Validation: Definition I claims that when a backdoor sample updates the model weights, the cosine similarity between it and a reference sample is greater compared to updates from clean samples. Numerical experiments that support this assertion would enhance the theoretical argument. Furthermore, recent research[3] indicates that backdoor training often converges faster than benign tasks. Could this faster convergence create counterexamples to the claims made in Definition I?\n\n4. Related Works: The paper lacks a comprehensive discussion and comparison with similar backdoor forensics frameworks, such as [4]. Including these would situate FoundationForensics more clearly within the existing body of research.\n\n5. Adaptive Attacks: The framework does not explore potential adaptive attack scenarios. For example, how would FoundationForensics perform if a model contained multiple backdoors? Is it assumed that defenders would possess samples corresponding to each backdoor trigger?\n\nReference \n---\n\n[1] Schuhmann, Christoph, et al. \"Laion-400m: Open dataset of clip-filtered 400 million image-text pairs.\" arXiv preprint arXiv:2111.02114 (2021).\n \n[2] Gadre, Samir Yitzhak, et al. \"Datacomp: In search of the next generation of multimodal datasets.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[3] Li, Yige, et al. \"Anti-backdoor learning: Training clean models on poisoned data.\" Advances in Neural Information Processing Systems 34 (2021): 14900-14912.\n\n[4] Cheng, Siyuan, et al. \"Beagle: Forensics of deep learning backdoor attack for better defense.\" arXiv preprint arXiv:2301.06241 (2023)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-written and easy to follow.\n\n2. The proposed method is novel and intuitive.\n\n3. The evaluation results demonstrate the potential of the proposed FoundationForensics framework"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces FoundationForensics, a framework designed for tracing backdoor samples within vision foundation models. The approach leverages detected pairs of backdoor and clean reference samples to compute their contributions to the backdoor loss during the pre-training phase. Samples exhibiting unusually high contributions are flagged as potential backdoor samples. Empirical results across single-modal and multi-modal vision foundation models, tested on three datasets, indicate that FoundationForensics effectively identifies poisoned samples from pre-training sets and surpasses existing baseline methods. The paper also includes a theoretical justification for the framework."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The threat model is weak, particularly assuming having a pair of clean and poisoned samples weaken the contribution of the proposed technique.\n\n2. The scalability of the proposed framework is well evaluated, especially on larger datasets.\n\n3. Lack of numerical experiments to support the assumption and hypothesis used in the theoretical analysis.\n\n4. Lack of discussion on several related works.\n\n5. Lack of discussion on potential adaptive attacks, such as a poisoned model with multiple backdoors."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please respond to weaknesses mentioned above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Timeliness and Importance of Topic: The focus on tracing back malicious pre-training inputs in foundation models addresses a critical and timely challenge, especially as the use of such models becomes pervasive across various applications. This work is particularly relevant given the increasing dependence on large-scale, unlabeled datasets sourced from the Internet, where the risk of encountering maliciously poisoned data is high.\n\n2. Theoretical Analysis of Maliciousness Score: The inclusion of a theoretical analysis that articulates the properties of the proposed maliciousness score enhances the credibility and robustness of the approach. By providing formal proofs that poisoned inputs contribute disproportionately to the similarity metric exploited by the backdoor, the paper grounds its empirical findings in solid theoretical foundations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces FoundationForensics, a pioneering method for tracing back poisoned pre-training inputs in foundation models after a backdoor attack. These models, pre-trained on uncurated data from the Internet, are vulnerable to such attacks, where an attacker inserts malicious inputs to manipulate the model's outputs. FoundationForensics identifies these poisoned inputs by calculating a maliciousness score for each pre-training input and flagging those with outlier scores. The method is both theoretically secure and empirically effective, as shown through tests on various models, datasets, and attack types."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Threat Model: A significant concern with the threat model is the assumption that the pre-training dataset is always available for forensic analysis. This assumption may not hold in the context of foundation model pre-training, where the datasets used are often proprietary and not publicly accessible due to privacy or competitive reasons. The paper's applicability is thus questionable in real-world scenarios where access to pre-training data is restricted or non-existent.\n\n2. Practicality: The evaluation of the forensic method on datasets with up to 500,000 inputs (as per Table 1(a) raises concerns about its scalability and practicality. Foundation models are typically trained on datasets that are orders of magnitude larger, often encompassing billions of data points. The method's performance and feasibility on such a scale remain untested, which may limit its usefulness in practical, large-scale applications.\n\n3. Design Details: The paper distinguishes between \"all pre-training inputs\" and \"pre-training steps that involve $x_i$,\" but this distinction might not effectively capture the individual impact of $x_i$. How does the change in Eq(3) show the isolated impact of $x_i$? Also, Line 191 said, \"we approximate $f_{t+1}− f_t$ as if only the pre-training input $x_i$ was used to update the foundation model.\" why is this a fair and reasonable approximation?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. For reference and backdoor inputs, do they come from a downstream dataset and never appear in the pre-training dataset?\n2. Is this method also valid for language foundation models under backdoor attacks (e.g., POR[1] and and NeuBA[2])?\n\n[1] Shen, Lujia, et al. \"Backdoor Pre-trained Models Can Transfer to All.\" *Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security*. 2021.\n\n[2] Zhang, Zhengyan, et al. \"Red alarm for pre-trained models: Universal vulnerability to neuron-level backdoor attacks.\" *Machine Intelligence Research* 20.2 (2023): 180-193."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents an intriguing and novel method to trace back poisonous pre-training data for foundation models in the first time. Especially their MAD-base detection method is straightforward. \n2. The paper also offers a sound theoretical analysis, making their finding more meaningful.\n3. Regarding experimental setting, they test numerous backdoor attack methods, making their forensics method valid. There are still some spaces for improvement (See weakness)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel forensics method to trace back poisoned pre-training data for backdoored foundation models by quantifying its contribution to the backdoor event. Their proposed metric, maliciousness score, are proved to be effective through extensive experiments. In particular, their theoretical analysis makes their conclusions more reliable."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation should be improved. First, the introduction of forensics is inadequate. The paper should introduce more about background of forensics method and how it can be used in real-world scenarios. Second, section 6.1 is not well-structured. For example, the description of which model is trained/finetuned on ImageNet100-B can not be found. \n2. Their proposed forensics method need the service provider to collect intermediate model checkpoints in advance. This setting makes their method less realistic.\n3. The only test input-agnostic backdoor attack. Whether their forensics is valid to the input-specifc backdoor (e.g., [1]) is unknown.\n\n\n\n[1] Lee, Yeonjoon, et al. \"Aliasing backdoor attacks on pre-trained models.\" *32nd USENIX Security Symposium (USENIX Security 23)*. 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\t**Robustness Against Advanced Attacks:** How does FoundationForensics handle cases where an attacker embeds multiple triggers in the inputs, potentially reducing the malicious score of individual inputs?\n2.\t**Generalization Beyond Vision Models:** Can the method be extended to foundation models beyond the visual domain (e.g., language models)? If so, what modifications are required?\n3.\t**Scalability:** As dataset size increases and model complexity grows, how does the computational cost scale? Are there optimization methods to reduce the storage overhead for saving checkpoints?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\t**Novelty:** This paper addresses a relevant and underexplored problem—tracking poisoned pre-training data in foundation models—filling a gap in existing backdoor defenses.\n2.\t**Theoretical Basis:** The paper offers a clear theoretical framework, including a proof that the malicious score can distinguish between poisoned and clean inputs, enhancing the credibility of the approach.\n3.\t**Comprehensive Evaluation:** The experimental analysis covers multiple datasets, various vision foundation models, and different types of backdoor attacks, showcasing the method’s generalizability and robustness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces an innovative forensic technique called **FoundationForensics**, aimed at detecting and tracing poisoned inputs in vision foundation models that have undergone backdoor attacks. The approach relies on a key observation: the similarity among poisoned inputs is generally higher than the similarity between poisoned and clean inputs. Based on this observation, the authors further introduce a \"maliciousness score\" to measure the contribution of pre-training inputs to the backdoor effect. Additionally, the paper provides a theoretical analysis of the validity of the malicious score and conducts extensive experimental evaluations across multiple foundation models and datasets. The experimental results demonstrate that FoundationForensics can effectively identify poisoned inputs with high accuracy, even in scenarios involving adaptive attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\t**Access Assumptions:** The method assumes access to specific pre-training checkpoints and the ability to perform gradient calculations throughout the pre-training process. However, this assumption may be impractical when dealing with pre-trained models from third-party sources.\n2.\t**Limited Coverage of Adaptive Attacks:** Although adaptive attacks were tested, the paper does not deeply explore the limitations of FoundationForensics against more sophisticated adaptive strategies, such as advanced backdoor techniques using natural features as triggers, which might obscure the contribution of poisoned inputs.\n3.\t**Storage and Computational Costs:** FoundationForensics relies on storing multiple checkpoints and computing their malicious scores, which could involve substantial computational and storage overhead. In large-scale scenarios, such costs may be prohibitive.\n4.\t**Dependency on Malicious Score Sensitivity:** The effectiveness of anomaly detection heavily relies on parameter tuning, particularly the hyperparameter \\(k\\) in the MAD method. While the paper discusses the choice of \\(k\\) in a limited setting (e.g., testing PE-II attacks on the CIFAR-10 dataset), different datasets, models, or attack scenarios may require separate adjustments for \\(k\\)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A defense method to traceback the root cause of backdoor attacks to pre-training data for vision foundation models"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024foundationforensics,\ntitle={FoundationForensics: Traceback Backdoor Attacks for Vision Foundation Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5AoOHSickG},\nnote={under review}\n}"
},
"abstract": {
"value": "Foundation models are typically pre-trained on uncurated unlabeled data collected from various domains on the Internet. As a result, they are fundamentally vulnerable to backdoor attacks, where an attacker injects carefully crafted poisoned inputs into the pre-training data via hosting them on the Internet. A backdoored foundation model outputs an attacker-desired embedding vector for any input with an attacker-chosen trigger. In this work, we propose FoundationForensics, the first forensics method to trace back poisoned pre-training inputs for foundation models after a backdoor attack has happened and a trigger-embedded input has been detected. Our FoundationForensics first calculates a maliciousness score for each pre-training input by quantifying its contribution to the foundation model's backdoor behavior for the detected trigger-embedded input and then detects the pre-training inputs with outlier maliciousness scores as poisoned. We theoretically analyze the security of FoundationForensics and empirically evaluate it on single-modal and multi-modal foundation models, three datasets, four existing backdoor attacks, and seven adaptive ones. Our results show that FoundationForensics can accurately traceback the poisoned pre-training inputs for foundation models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Backdoor Attacks",
"Foundation Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c5bc5e5fe2c113b805eb9da029e1536439046885.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "FoundationForensics: Traceback Backdoor Attacks for Vision Foundation Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5AtHrq3B5R | PnP-Flow: Plug-and-Play Image Restoration with Flow Matching | main | Active | Plug-and-Play;Flow Matching;image restoration;inverse problems;generative modeling | generative models | 3;5;6;8 | 5;3;4;3 | 2;2;2;3 | 2;2;3;3 | 1;3;3;3 | 5.5 | 3.75 | 2.25 | 2.5 | 2.5 | -0.752618 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Thank you for your review. We will address your comments on the numerical section more thoroughly in our next response. It seems that you found certain parts of the paper unclear, while the other reviewers considered the presentation to be good. Could you kindly provide specific points where the explanation was unclear, and indicate what additional details you believe are necessary? For reference, the function F (which is the data-fidelity term) is defined on line 45, and the steps of the algorithm are detailed in Section 3.2. We look forward to making the necessary revisions to enhance the clarity of the paper."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Clarification for rebuttal"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. The formula \"y = noisy(Hx)\" uses a general definition for the noisy function. It would be helpful if the paper and experiments explored multiple types of noise to assess the method’s robustness.\n\n2. In Tables 1 and 2, the comparison is limited, particularly with only one diffusion-based method, PnP-Diff, which is a workshop paper, not a main conference paper. The authors should include comparisons with more diffusion-based methods, such as DPS, DeqIR, and DDRM, to provide a fuller view of how their method performs relative to the latest diffusion techniques.\n\n3. The authors could enhance the evaluation by including the ImageNet dataset. For the denoising and deblurring tasks. Testing the method across various noise levels and degrees of blur on a large, diverse dataset like ImageNet would offer more insight into how well the algorithm handles different types of degradation.\n\n4. In Figure 3, the visual results do not show a significant improvement over other methods (e.g., in the last row), even though the PSNR scores are higher. \n\n5. For real-world data with unknown degradation, it would be important to understand how well this method generalizes. \n\n6. It would strengthen the paper if the authors included examples of failure cases. \n\n7. In Table 3, not all methods are compared for computational time and memory usage. Including all relevant methods in this comparison would give a clearer picture of how the proposed algorithm stacks up in terms of efficiency across different benchmarks."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Low memory usage, making it suitable for high-resolution images.\n2. Consistently performs well across multiple tasks, showing stable PSNR and SSIM improvements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the PnP-Flow Matching algorithm for addressing imaging inverse problems, including denoising, super-resolution, deblurring, and inpainting. The method combines the Plug-and-Play (PnP) framework with Flow Matching (FM) models by using a time-dependent denoiser to tackle image restoration tasks. Specifically, the algorithm alternates between gradient descent on a data fidelity term, reprojection onto a flow matching path, and denoising. The experiments demonstrate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The writing quality needs Improvement. Certain explanations lack clarity, particularly in describing the algorithmic process, e.g., the function F.\n2. The details of the proposed method are insufficient.\n3. The experiment section should be improved. Please refer to the details below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you add comparisons with [1] and [2], or explain why those comparison are missing?\n2. Could you comment on the potential applicability/extension of your method to the blind case?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The method is training-free which makes it computationally practical.\n2. The method achieves SOTA results compared to existing flow-based methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a zero-shot method (PnP-flow) for Inverse problems based on a pre-trained flow-matching (FM) model. The method combines the plug-and-play (PnP) framework with flow matching by alternating between gradient descent steps on the data-fidelity term, reprojections onto the learned FM path, and denoising. PnP-flow achieves state-of-the-art (SOTA) results compared to existing PnP and flow-based algorithms across different image inverse problems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. My major concern is the lack of comparison to recent zero-shot methods based on a pre-trained diffusion model such as DDNM [1] and DPS [2].\n2. The proposed method is non-blind (assume the full knowledge of the degradation model) which limits its applicability.\n\n\n[1] Wang et al. Zero-Shot Image Restoration Using Denoising Diffusion Null-Space Model. ICLR 2023\n\n[2] Chung et al. Diffusion Posterior Sampling for General Noisy Inverse Problems. ICLR 2023"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1, It proposes a design a time-dependent denoiser based on a pre-trained velocity field v learned through Flow Matching\n\n2, This denoiser is integrated into an adapted Forward-Backward Splitting PnP framework that cycles through a gradient step on the data-fidelity term, an interpolation step and a denoising step\n\n3, Being computationally efficient and memory-friendly via the use of ODE"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes to use flow matching in the plug-and-play framework for image restoration. The key is to use FM model as the denoisier. To avoid the numerical challenges, it integrates the implicit FM prior into a custom denoisier."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1, Why the percpetual metrics are missing? From the visual results, it also seems that the results tend to be blurry. What’s the underlying reason? Is it due to the gradient step or the interpolation step, or something else? \n\n2, In addition, one of the advantages of these generative method is its high perceptual quality, but this method seems to have achieved good distortion performance. How about the results of employing the same end-to-end U-Net model as a simple baseline (for example, using the L1 loss)?\n\n3, Can you visualize all the intermidate resutls of all three steps for all time steps? It could better help readers understand the method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Please explain why the computational complexity and memory footprint of the proposed method is lower than the previous method. Is it due to the design of the model or the choice of the straight-line flow?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. A new plug-and-play method based on flow matching is proposed in this paper.\n\n2. The paper is well-written.\n\n3. The derivations in this paper are rigorous.\n\n4. The computational complexity and memory footprint of the proposed method is lower than the previous methods due to the careful design."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors proposed a plug-and-play image restoration method based on flow matching. The reformulation starts from the forward-backward splitting algorithm, where the proximal step is replaced by a denoising step to form the plug-and-play forward-backward splitting algorithm. The authors insert a specific flow matching method, namely straight-line flows into the PnP-FBS framework due to the computation efficiency of the straight-line flows. Formally, the PnP flow matching algorithm consists of three steps: a gradient step on the data fidelity term, an interpolation step, and a PnP denoising step that is specifically designed to denoise inputs drawn from the straight path."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The restored images seem to be over-smoothed."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce an algorithm combining Plug-and-Play with Flow Matching for solving imaging inverse problems."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024pnpflow,\ntitle={PnP-Flow: Plug-and-Play Image Restoration with Flow Matching},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5AtHrq3B5R},\nnote={under review}\n}"
},
"abstract": {
"value": "In this paper, we introduce Plug-and-Play (PnP) Flow Matching, an algorithm for solving imaging inverse problems. PnP methods leverage the strength of pre-trained denoisers, often deep neural networks, by integrating them in optimization schemes. While they achieve state-of-the-art performance on various inverse problems in imaging, PnP approaches face inherent limitations on more generative tasks like inpainting. On the other hand, generative models such as Flow Matching pushed the boundary in image sampling yet lack a clear method for efficient use in image restoration. We propose to combine the PnP framework with Flow Matching (FM) by defining a time-dependent denoiser using a pre-trained FM model. Our algorithm alternates between gradient descent steps on the data-fidelity term, reprojections onto the learned FM path, and denoising. Notably, our method is computationally efficient and memory-friendly, as it avoids backpropagation through ODEs and trace computations. We evaluate its performance on denoising, super-resolution, deblurring, and inpainting tasks, demonstrating superior results compared to existing PnP algorithms and Flow Matching based state-of-the-art methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Plug-and-Play",
"Flow Matching",
"image restoration",
"inverse problems",
"generative modeling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f4d969e75b42f5c596973ce662c7e9bdb19c986b.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/59eceba2a2d25639bd758e1697310b57540ecdd2.zip"
},
"title": {
"value": "PnP-Flow: Plug-and-Play Image Restoration with Flow Matching"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5AtlfHYCPa | HR-Extreme: A High-Resolution Dataset for Extreme Weather Forecasting | main | Active | Weather Forecast Dataset;Extreme Weather;Deep Learning;Numerical Weather Prediction | datasets and benchmarks | 3;5;6;8 | 4;5;4;4 | 2;2;2;3 | 2;2;3;3 | 3;3;2;2 | 5.5 | 4.25 | 2.25 | 2.5 | 2.5 | -0.160128 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "please refer to the weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Pros: \n\n1. Extreme weather forecasting evaluation is an important research problem. \n2. The provided dataset introduces 17 extreme events, which are comprehensive. \n3. Authors also release the code for generating the data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Extreme weather forecasting is a crucial problem for the whole world. With the rise of deep learning-based weather forecasting models, the effectiveness of them on extreme weathers are not well analyzed. This paper targets on providing a new benchmark for extreme weather forecasting. Authors employ the HRRR data and utilize the extreme events record in three sources to crop the extreme feature from the original HRRR dataset. Experiments are conducted with four baselines to show the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Cons:\n\n1. It is not clear how the HR-heim model is trained. \n2. Considering the dataset is a processed version of HRRR, it would be helpful to provide the geo-location of the extreme data to facilitate more diverse use from users. \n3. While the dataset is valuable, there is almost no analysis are present, especially compared to the ERA5 dataset, which is not insightful."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Was the result of the clustering algorithm verified by domain experts? Is there corresponding uncertainty detection and assessment?\n\n2. What is the basis for the \"types of events without specific ranges or those not related to obvious variations in feature map predictions\"?\n\n3. Can data from different sources be used separately?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. **Significance**: It fills the gap in benchmarking for extreme weather assessment in deep learning-based medium-range weather forecasting tasks.\n\n2. The dataset is clearly introduced, and it will be fully open-sourced.\n\n3. The authors present comprehensive experiments and baseline results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work utilizes high-resolution HRRR data to create the HR-Extreme dataset, which encompasses a comprehensive set of 17 extreme weather types. The aim is to provide a more specialized dataset for evaluating the performance of weather forecasting models. To achieve this goal, the authors employed unsupervised clustering and manual filtering methods to develop a complete feature map of extreme events in a machine learning-ready format for the continental United States. The dataset was then used to assess the 1-hour forecasting capabilities of existing medium-range prediction models in comparison to the HR-Heim model proposed in this study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The description of the dataset generation process lacks clarity in some areas: a more detailed introduction of the clustering method is needed, including how records from different sources are handled and the hyperparameters of the algorithm.\n\n2. There are also unclear aspects in the experimental description: \n a) The baselines are models trained on globally coarse-resolution grids; Was there any further fine-tuning on this dataset? What preprocessing steps were taken? \n b) What is the training strategy for HR-Heim? Does it use the same hyperparameter settings on the original dataset and HR-Extreme?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please provide specific responses to each of the concerns and questions raised above in the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The constructed dataset could provide a useful evaluation of extreme events in new ML based weather prediction models which tend to be evaluated on larger scale statistics, potentially hiding biases in such events. It is based on an operational high-resolution dataset with a mixture of automated and manual labelling."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a new dataset of labelled extreme weather events over the continental US based on a high resolution (3km) numerical forecast product. They compare the events from a numerical weather prediction (NWP) model as well as two baseline ML based weather models and a newly proposed variant. The authors claim improved skill in their new variant compared to the other baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a number of technical and fundamental weaknesses that undermine the above strengths.\n\nMajor issues:\n1) Extreme events are, by definition, rare. While a database of such events derived from a high-resolution reanalysis product could provide a useful starting point for evaluation, the specific events are much less useful without a probabilistic understanding or measure of their likelihood at different lead times. i.e. It is probably more likely that any of the given events *wouldn't* have happened given a similar atmospheric state if the conditions were encountered again. Without a probabilistic understanding of the probability of an event (based on lots of comparable events that weren't extreme), this dataset has very little value as presented.\n 2) This leads to my second concern regarding the evaluation setup. Given the dataset extracts the atmospheric state at t=0, -1 and -2 hours, I presume the evaluations happen from an atmospheric state of t=-2 hours and run forward? This is no longer forecasting, but nowcasting and quite a different task. Since the atmospheric state already has the extreme event and the model just needs to advect it correctly. I have no idea how the NWP model is compared in this setting since presumably the authors don't run this explicitly with the extracted state? Or perhaps they do? Also, since the authors use the same NWP as was used to create the HRRR dataset, why doesn't it perform as well as the other models? At what resolution are the models run, are they run globally, over the US, or only over the event region? Are the global Fuxi and Pangu models retrained for these regions?\n 3) Related to this, to what extent is this dataset used to train the different models? Does HR-Helm get to see some or any extreme event data during training? Given the issues described in (1), how do you avoid overfitting?\n\nOne more minor issue is that I would like to see Figure 4 presented for the same day for all 4 models, with a separate figure for the other day for all 4 models in the appendix. Currently it's impossible to fairly compare the skill of the models (although it seems like the NWP already does better than HR-Helm across the two examples)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* Could you extend the dataset analysis to include predictions for more than a single hour ahead?\n* As an extension to the previous question, could you analyze and compare the performance of state-of-the-art models and HR-Heim for both short-term and long-term forecasting?\n* Could you further evaluate the performance of HR-Heim on a similar extreme weather dataset?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The authors provide a high-resolution dataset for numerical weather forecasting under extreme weather conditions. The need for such dataset has been evident for some time, as highlighted by the low performance analysis of SOTA weather forecasting models.\n* The construction of the dataset is well-motivated, and the authors provide a clear and thorough explanation of both the data collection and construction processes.\n* Additionally, the authors provide a baseline deep learning model called HR-Heim, which is inspired by a SOTA numerical weather forecasting model, to specifically excel and outperform SOTA models under extreme weather conditions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a high-resolution dataset, called HR-Extreme, for numerical weather forecasting under extreme weather conditions, an area often overlooked in weather forecasting literature. The authors also present a baseline deep learning model alongside the dataset, called HR-Heim, which outperforms state-of-the-art weather models in extreme weather conditions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* In Section 2.2, the authors discuss some datasets that share similarities (to a certain extent) with the proposed one. It would have been beneficial to illustrate the performance of the SOTA models and the proposed HR-Heim on some of these datasets. Specifically, the last dataset introduced by Liu et al., which also includes certain extreme weather conditions, could have been included in the experiments to support the need for HR-Extreme and support the performance of HR-Heim. Given that HR-Heim outperforms SOTA methods in HR-Extreme, we would expect to see similar results on other datasets with extreme weather conditions.\n* As the authors also state as a limitation, this kind of study should include more than just a single-step prediction analysis. Given the increased difficulty of predicting extreme weather events in the long term, an accuracy comparison between HR-Heim and SOTA methods across varying time horizons could be valuable."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present HR-Extreme, a high-resolution dataset for evaluating extreme weather forecasting accuracy, enhancing the practical utility of SOTA models."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hrextreme,\ntitle={{HR}-Extreme: A High-Resolution Dataset for Extreme Weather Forecasting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5AtlfHYCPa},\nnote={under review}\n}"
},
"abstract": {
"value": "The application of large deep learning models in weather forecasting has led to\nsignificant advancements in the field, including higher-resolution forecasting and\nextended prediction periods exemplified by models such as Pangu and Fuxi. Despite\nthese successes, previous research has largely been characterized by the neglect\nof extreme weather events, and the availability of datasets specifically curated for\nsuch events remains limited. Given the critical importance of accurately forecasting\nextreme weather, this study introduces a comprehensive dataset that incorporates\nhigh-resolution extreme weather cases derived from the High-Resolution Rapid\nRefresh (HRRR) data, a 3-km real-time dataset provided by NOAA. We also\nevaluate the current state-of-the-art deep learning models and Numerical Weather\nPrediction (NWP) systems on HR-Extreme, and provide a improved baseline\ndeep learning model called HR-Heim which has superior performance on both\ngeneral loss and HR-Extreme compared to others. Our results reveal that the\nerrors of extreme weather cases are significantly larger than overall forecast error,\nhighlighting them as an crucial source of loss in weather prediction. These findings\nunderscore the necessity for future research to focus on improving the accuracy of\nextreme weather forecasts to enhance their practical utility"
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Weather Forecast Dataset",
"Extreme Weather",
"Deep Learning",
"Numerical Weather Prediction"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/dce5f6a4cee87e518ce22ed84a7aa5cfe35b9bd0.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/5cb4692f49f3939e06b2ee5dc852fb797875d76e.pdf"
},
"title": {
"value": "HR-Extreme: A High-Resolution Dataset for Extreme Weather Forecasting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5B6eSE6l4M | Performance Heterogeneity in Message-Passing and Transformer-based Graph Neural Networks | main | Active | Graph Neural Networks;Transformers;Rewiring;Example Hardness;Generalization | learning on graphs and other geometries & topologies | 1;3;5;6 | 5;4;4;3 | 1;3;3;3 | 1;2;3;3 | 1;2;4;3 | 3.75 | 4 | 2.5 | 2.25 | 2.5 | -0.920575 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "This work addresses an important yet challenging research topic in the graph field. I appreciate the focus on identifying performance heterogeneity in graph-level tasks. However, my primary concern is ensuring the proposed solution’s generalizability. For further details, please refer to the Weaknesses section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Performance heterogeneity is a well-recognized and valuable research problem in graph learning.\n- The proposed selective rewiring approach is promising for addressing performance heterogeneity in graph-level tasks.\n- The observation that optimal network depth depends on the graph’s spectrum is intriguing, and the subsequent heuristic method for selecting the number of GNN layers is validated by the experimental results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the performance heterogeneity of message-passing and transformer-based architectures in graph-level tasks. Unlike previous studies that focused on node-level tasks, the authors find that graph topology alone does not fully explain heterogeneity. Instead, they establish a connection between class-distance ratios and performance heterogeneity using the Tree Mover's Distance. Building on these observations, the authors propose a selective rewiring approach and a heuristic for determining optimal GNN depths."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The generalization capability of the proposed selective rewiring approach remains uncertain. While the motivation for this approach is empirically driven, the observations may be biased by the specific graph datasets tested. Would the conclusions hold on challenging open-source benchmarks, such as large-scale datasets in OGB (e.g., ogbg-ppa and ogbg-code2)?\n- Given the proposed solutions, how can they be applied to new graph scenarios? For new graph datasets, is there a confidence measure for selective rewiring or heuristic GNN depth prediction?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How might different settings for parameters affect the conclusions drawn from the study?\n\n2. In line 298, these 'difficult' graphs are nearly identical for GIN and GraphGPS. Given that Figure 3 provides quantitative metrics but does not specify which exact graphs are considered \"difficult\" across both architectures, could the authors clarify how they determined that the same graphs pose difficulties for both GIN and GraphGPS?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of heterogeneity profiles as a tool in graph-level learning to analyze performance across graphs provides a new methodological avenue for studying GNNs.\n\n2. The selective rewiring approach offers a pragmatic solution to a common problem in GNN deployment, potentially simplifying the model training process.\n\n3. The experiments are well-designed, covering multiple datasets and configurations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper delves into the phenomenon of performance heterogeneity in graph-level learning, emphasizing how variations in graph topology influence model effectiveness across various architectures. Central to the study is the introduction of heterogeneity profiles, a novel analytical tool designed to systematically evaluate performance disparities across graph-level tasks. These profiles reveal that performance heterogeneity is shaped by factors beyond mere topological characteristics. Building on the analysis of heterogeneity profiles, the research progresses by proposing a selective rewiring strategy. This strategy aims to optimize network architecture based on the spectral properties of graphs, positing that aligning these properties can mitigate the need for extensive hyperparameter tuning by standardizing the optimal depth of GNN layers across different datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not provide a detailed description of how hyperparameters were tuned, only presenting the final hyperparameter results. Different hyperparameter settings, including adjustments to hidden dimensions, dropout rates, activation functions, and normalization techniques, could provide a stronger, more robust set of results. \n\n2. The study lacks a detailed comparison with other state-of-the-art methods that aim to address similar challenges in GNNs. While the paper proposes innovative strategies for improving graph-level task performance, such as selective rewiring and optimized network depth based on heterogeneity profiles, it lacks empirical evidence showing that these approaches achieve state-of-the-art results on 1 or 2 benchmark datasets. This omission could undermine the perceived effectiveness and practical relevance of the proposed methods.\n\n3. The datasets used in the study are relatively small in scale. Incorporating results from more extensive and challenging datasets, such as those from the OGB, would strengthen the validation of the techniques and enhance the paper’s impact."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- I think the attempted research question is fundamental and important for advancing GNN\n\n- I do like the overall approach which is systematic"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates performance heterogeneity in graph neural networks (GNNs), specifically focusing on message-passing (MPGNNs) and transformer-based architectures. It addresses the challenges in understanding model performance variation on individual graphs within datasets used for graph-level learning. To capture performance variations, the authors introduce heterogeneity profiles and leverage the Tree Mover’s Distance (TMD) to demonstrate that both topological and feature information influence performance heterogeneity. The study explores how class-distance ratios, graph rewiring, and network depth impact heterogeneity, proposing a selective rewiring method and a depth-selection heuristic based on spectral alignment. The experiments validate these techniques, showing improved performance on multiple benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- My main concerns with respect to this paper are the contribution and novelty of the methodology and results. More specifically,\n\na. There is certainly merit in investigating the impact factors for the performance of GNN and I do like a more systematic approach. However, given the fact that GNN can be viewed as a function with the input of both structure and feature, it seems obvious that both feature and structure would affect the output/performance of GNNs. With this said, I am not convinced and not comfortable with the claim that topology is enough to explain node-level tasks (see [1] for an example).\n\nb. One of the claimed contribution is the so-called \"heterogeneity profiles\". I do not see a detailed introduction or discussion on this technique and why it is novel. Based on the description on Section 3, it seems a standard random experiments/k-fold validation. Please correct me if I am wrong\n\nc. The claimed research question investigates the factors that explain performance heterogeneity. However, the explained framework (tree mover distance) used in the paper is directly adopted from another paper. What is the new insight provided in this paper? In addition, there are many other distance metric such as FID can combine structure and feature. Why not consider those?\n\nd. The paper tries to connect the experimental insight with graph rewiring and over-smoothing. The papers try to connect over-smoothing with the diffusion property of graph structure (fiddle eigenvalue of graph matrix). Despite being somewhat intuitive, it is not a strong explanation for over-smoothing as it is not clear how the diffusive property of graph structure would affect training or generalization. In addition, I think this diffusive property would largely affect node-level tasks. It is not entirely clear to me why this concept is applicable for graph-level tasks. Please explain. While I do think that selective graph rewiring could be a highlight of the paper, the paper does not go into detail in this regard. For example, how do you use the empirical result/theoretical result to obtain the proposed criteria for the selection?\n\n- the presentation and organization of the paper need to be improved. I think the paper right now attempts to connect too many concepts and methods (performance heterogeneity, over-smoothing, graph rewiring e.t.c). I do admire the ambitious goal. However, in the current version of the paper, the connections among these concepts and methods are presented in a rather superficial way (this might be because of the page limit). I do encourage the authors to dive deeper into these connections as they are important for advancing GNN.\n\n\n[1] \"Subgroup generalization and fairness of graph neural networks.\" Advances in Neural Information Processing Systems 34 (2021): 1048-1061."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Can you explain the rationale behind the differing datasets and models used in Sections 3.2, 3.3. and 4?\n2. While the paper highlights performance inconsistencies in GNNs, it does not present any concrete solutions to address this issue. What are your thoughts on proposing methodologies or frameworks to mitigate these inconsistencies in future work?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "Identifying the reasons behind the high variance in performance across different models is crucial; however, this work fails to offer new insights into this issue."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates the performance variability of GNNs in graph-level tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper highlights the variance in GNN performance on graph-level tasks; however, I believe this contribution does not offer new insights to the community. It is already well-established that Graph Neural Networks (GNNs) exhibit performance inconsistencies and that the effectiveness of different models often varies across datasets [1, 2].\nFurthermore, the paper fails to propose a concrete solution to address this phenomenon. Inconsistencies in evaluation across sections further undermine its findings, as I will elaborate in my next comment.\n\n2.\nThe authors investigate the inconsistency of performance on graph-level tasks by examining a range of models and datasets. However, the presentation lacks clarity regarding the consistency of their selections. For instance, in Section 3.2, three datasets are analyzed using two GNN architectures—specifically, GCN and GraphGPS. In contrast, Section 3.3 shifts to the Mutag dataset and the GIN model, raising questions about why different datasets and models were chosen for these sections.\nGiven that the paper's primary contribution aims to highlight an empirical phenomenon, I believe a more comprehensive evaluation is warranted, one that encompasses a broader array of benchmarks and various GNN architectures. Notably, the Mutag and Proteins datasets are recognized for their instability, as documented in previous studies [1]. The MUTAG dataset, in particular, is small and characterized by high variance, which has led to its declining usage in recent research.\nAdditionally, in Section 4, GPS is not tested at all; instead, GAT is utilized, despite all sections focusing on the same claim of heterogeneity in results. This inconsistency in model evaluation detracts from the paper’s coherence and impact.\nThe evaluation graph transformers focus solely on one type of graph transformer, GraphGPS, which is inadequate to substantiate the claim that “Our analysis suggests that both message-passing and transformer-based GNNs display performance heterogeneity in classification and regression tasks” (line 102). There exists a diverse range of graph transformers, as highlighted in studies such as [3, 4], which should be considered to strengthen the analysis.\n\n3. The paper suffers from poor writing, featuring numerous grammatical errors and incomplete sentences. For instance, refer to lines 250, 352, 168, 576, and 187. Additionally, some sentences lack clear connections to the surrounding text, such as the statement: \"Size generalization in GNNs has been studied in (Yehudai et al., 2021; Maskey et al., 2022; Le & Jegelka, 2024).\"\nThe overall quality of English in the paper is inadequate and unprofessional, significantly detracting from the clarity and credibility of the research.\n\n8. Overall, I find it difficult to see how the content of the paper supports the claims made in the abstract.\n\n[1] A Fair Comparison of Graph Neural Networks for Graph Classification, Errica et al.\n[2] Design Space for Graph Neural Networks, You et al., NeurIPS20.\n[3] Do Transformers Really Perform Bad for Graph Representation?, Ying et al, 2021.\n[4] Heterogeneous Graph Transformer, Hu et al., 2020."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024performance,\ntitle={Performance Heterogeneity in Message-Passing and Transformer-based Graph Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5B6eSE6l4M},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Neural Networks have emerged as the most popular architecture for graph-level learning, including graph classification and regression tasks, which frequently arise in areas such as biochemistry and drug discovery. Achieving good performance in practice requires careful model design. Due to gaps in our understanding of the relationship between model and data characteristics, this often requires manual architecture and hyperparameter tuning. This is particularly pronounced in graph-level tasks, due to much higher variation in the input data than in node-level tasks. To work towards closing these gaps, we begin with a systematic analysis of individual performance in graph-level tasks. Our results establish significant performance heterogeneity in both message-passing and transformer-based architectures. We then investigate the interplay of model and data characteristics as drivers of the observed heterogeneity. Our results suggest that graph topology alone cannot explain heterogeneity. Using the Tree Mover’s Distance, which jointly evaluates topological and feature information, we establish a link between class-distance ratios and performance heterogeneity in graph classification. These insights motivate model and data preprocessing choices that account for heterogeneity between graphs. We propose a selective rewiring approach, which only targets graphs whose individual performance benefits from rewiring. We further show that the optimal network depth depends on the graph’s spectrum, which motivates a heuristic for choosing the number of GNN layers. Our experiments demonstrate the utility of both design choices in practice."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Networks",
"Transformers",
"Rewiring",
"Example Hardness",
"Generalization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3253577db1a001385f1412adbbfb729ae4d73b22.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Performance Heterogeneity in Message-Passing and Transformer-based Graph Neural Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5BRFddsAai | HASARD: A Benchmark for Harnessing Safe Reinforcement Learning with Doom | main | Active | benchmark;game;doom;vizdoom;3D;safe RL;reinforcement learning;constraint;difficulty level;PPO;Lagrange;sample-factory;vision;AI safety | datasets and benchmarks | 3;3;5;6 | 4;4;4;3 | 2;3;2;3 | 2;2;2;3 | 3;3;2;3 | 4.25 | 3.75 | 2.5 | 2.25 | 2.75 | -0.777778 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Which tasks in HASARD require memory capabilities, and which involve long-horizon decision-making? It would be helpful if the authors could clarify how the benchmark challenges an agent’s memory and planning capabilities over extended time sequences.\n\n2. Why did you choose ViZDoom to build this benchmark? Does this platform offer specific advantages? From my perspective, it seems that ViZDoom allows only minor modifications to its existing game structure and may lack the flexibility to define more complex, varied tasks. Why not consider using a truly open-world environment, such as MineDojo [4], which enables safer RL environments with more sophisticated task definitions? A platform like MineDojo could potentially support a broader range of scenarios and facilitate more diverse task creation.\n\n3. Additionally, I noticed that you used Omnisafe for algorithm benchmarking, but this wasn’t mentioned in the paper. I have some questions regarding one of the baselines you implemented. In the P3O algorithm code (see here:https://github.com/PKU-Alignment/omnisafe/blob/main/omnisafe/algorithms/on_policy/penalty_function/p3o.py#L82), there is a term J_c in the loss function that appears to be independent of the network parameters. What effect does including J_c in the loss function have? I observed in your experimental results that P3O also fails to satisfy the constraints, which may be related to the J_c term. This raises some doubts about the effectiveness of this baseline.\n\n[4] MineDojo: Building Open-Ended Embodied Agents with Internet-Scale Knowledge"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Diverse Scenarios: HASARD provides varied 3D environments with different objectives and challenges, such as item collection, navigating hazardous terrain, and avoiding neutral units. This variety enriches the learning and testing possibilities, ensuring that the benchmark assesses both task performance and safety considerations.\n\nStructured Curriculum: By offering three difficulty levels, HASARD presents a built-in curriculum for training RL agents, allowing gradual learning in increasingly challenging conditions. This approach is effective for developing robust agents that can generalize to new, more complex scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents HASARD, a benchmark tailored to vision-based safe reinforcement learning (RL) using egocentric, pixel-based inputs. Built on the ViZDoom platform, HASARD comprises six unique 3D environments across three difficulty levels, each designed to test safe RL in increasingly complex and dynamic scenarios. The benchmark allows for a range of agent objectives, from navigation to item collection and hazard avoidance, focusing explicitly on embodied safe RL with vision-based inputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Outdated Baselines: All the baseline algorithms were published over two years ago, and the original implementations of these baselines do not support visual inputs. The lack of SOTA vision-input baselines, such as Lambda [1], Safe SLAC [2], and SafeDreamer [3], limits the benchmark’s relevance in evaluating current state-of-the-art safe RL methods.\n\nSolvability by Existing Algorithms: Have the tasks introduced in this framework already been solved by existing algorithms? For instance, can PPO-PID successfully address these tasks? Are there settings within HASARD that current algorithms struggle to handle? By not including experiments with the latest baselines, it is unclear whether the HASARD benchmark will drive the development of new algorithms or simply reaffirm existing solutions.\n\nTask Complexity:\nWhat is the primary contribution of HASARD compared to existing safety benchmarks, such as Safety Gymnasium? Compared to Safety Gymnasium, HASARD primarily adds hard constraints and fast simulation. However, implementing hard constraints is relatively straightforward, merely requiring a single line of code to terminate the episode upon any unsafe action. As for fast simulation, HASARD achieves this by sacrificing simulation fidelity and simplifying the action space, which limits its meaningfulness as a contribution compared to Safety Gymnasium.\n\nMoreover, most tasks in HASARD revolve around avoiding hazardous obstacles, which has already been extensively addressed and solved in Safety Gymnasium by existing algorithms (e.g., [1-3]). Given HASARD's simplified dynamics and action space, it would need to introduce more complex tasks than those in Safety Gymnasium to stimulate the development of new algorithms. However, I did not observe any such complexity in the task design that would distinguish it from prior benchmarks.\n\n[1] CONSTRAINED POLICY OPTIMIZATION VIA BAYESIAN WORLD MODELS\n\n[2] Safe Reinforcement Learning From Pixels Using a Stochastic Latent Representation\n\n[3] SafeDreamer: Safe Reinforcement Learning with World Models"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No ethics concerns."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The article does not provide an in-depth analysis of performance under different safety budgets. Is there a plan to supplement the experiments with varying safety thresholds to comprehensively demonstrate the trade-offs between reward and safety for each algorithm? This would be very helpful in understanding the adaptability of different methods under various safety requirements.\n2. Considering the limitations of ViZDoom in simulating real-world physics, have the authors explored other engines with superior physical simulation capabilities (e.g., Isaac Gym)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors tested six baseline algorithms on HASARD and provided an analysis of the results.\n2. The tasks move beyond simple 2D navigation to incorporate complex elements such as spatial understanding"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "HASARD is a benchmark testing platform specifically designed for safe reinforcement learning, based on ViZDoom, providing a diverse range of 3D environments.\n\n1. The tasks on this platform require agents to pursue high rewards while considering safety strategies, moving beyond simple 2D navigation to incorporate complex elements such as spatial understanding.\n2. HASARD offers three difficulty levels and supports both soft and hard safety constraints, flexibly adapting to varying safety requirements.\n3. The platform integrates Sample-Factory, enabling high-speed simulation that allows agents to address real-world safety challenges while reducing computational costs.\n4. HASARD includes six environments based on ViZDoom and benchmarks various methods to demonstrate the limitations of existing technologies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The reviewer believes that if the distinction between soft and hard constraints is merely based on whether the threshold is $0$, then other benchmarks share this characteristic, making this claim somewhat unsubstantiated.\n2. Although multiple methods were tested in the current experiments, there is a lack of analysis on performance under different safety budgets. It is recommended to include experiments with varying safety thresholds to better understand the trade-off between safety and reward for each algorithm.\n3. HASARD is based on the ViZDoom game engine, which, while computationally inexpensive, lacks detailed simulation of real-world physics.\n4. The anonymous video link provided by the authors is inaccessible."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses above:\n\n- Unfortunately, while the paper is a decent attempt at building a safe RL benchmark, I am not convinced the safe RL community will be incentivized to use it. The main reason is that the notions of constraints in this benchmark are not directly tied to the very pragmatic safety considerations that need to be tackled in the real world - ranging from control systems to robotic deployments. Could the authors clarify how exactly they envision this benchmark to drive innovation in the safe RL community? And what sub-field of researchers would be likely to use it?\n\n- The benchmark feels a bit incremental compared to the already existing VizDoom framework that has been around for years. Can the authors clarify if the proposed modifications are non-trivial and if they can be broadly applied to potentially other frameworks like Minecraft and other games?\n\n- The evaluations are all with variants of PPO and no other safe RL algorithms are tested. It is unclear why this is the case, since in my understanding the benchmark should not be tied to a particular type of algorithm. Please clarify the evaluations and if there is any specific assumption on the type of safe RL algorithms that could be tested on the benchmark? \n\n- Can the authors make 1-1 comparisons with the proposed benchmark and the features of prior simulated and real world benchmarks that have been used by safe RL papers in the past?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well motivated and targets an important problem - that of building realistic and reliable RL benchmarks, and more specifically benchmarks for safe RL. This involves addressing challenges with the simple natural of prior benchmarks - both visually and in terms of higher dimensional action space and increased temporal horizons. \n\n- The proposed benchmark HASARD is built on top of an existing game engine VizDoom and is able to inherit all of its properties for re-use. The multiple levels in HASARD can be potentially helpful in evaluating different notions of safety in proposed safe RL algorithms. \n\n- The paper has detailed evaluations of several safe RL algorithms on HASARD indicating that the framework is feasible for training constrained RL policies. The evaluations reveal that simple algorithms based on PPO and constrained PPO can achieve non-trivial performance in the benchmark and also reasonable constraint satisfaction. It is good to see that these simple algorithms do not saturate the benchmark and there is still a lot of room for improvement."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new egocentric vision-based 3D simulated environment for benchmarking safe reinforcement learning. The benchmark is more realistic and challenging compared to common prior safe RL benchmark environments. In addition, the paper has evaluations for some safe RL algorithms on the proposed benchmark demonstrating its feasibility of use and the potential for building better approaches to perform more favorably on it."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Unfortunately, while the paper is a decent attempt at building a safe RL benchmark, I am not convinced the safe RL community will be incentivized to use it. The main reason is that the notions of constraints in this benchmark are not directly tied to the very pragmatic safety considerations that need to be tackled in the real world - ranging from control systems to robotic deployments. \n\n- The benchmark feels a bit incremental compared to the already existing VizDoom framework that has been around for years. The modifications for the different levels and environments in this framework do not capture the notions of open-world generalization and realism the field is headed towards in terms of evaluating RL systems. In addition, a lot of prior safe RL works have bechmakred their systems on real-world systems like robotic navigation and manipulation, and I am not convinced that a modified VizDoom framework is likely to create a reasonable impact in the community. \n\n- The evaluations are all with variants of PPO and no other safe RL algorithms are tested. It is unclear why this is the case, since in my understanding the benchmark should not be tied to a particular type of algorithm"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is ViZDoom truly a 3D environment, considering its graphics appear pixelated and less detailed compared to modern 3D games? \n\n \n\n2. Why are the baseline algorithms limited to PPO-based approaches? Could the paper include more diverse methods, such as model-based safe RL or constrained policy optimization (e.g., https://arxiv.org/abs/2210.07573)? \n\n \n\n3. How can continuous safe RL algorithms be benchmarked when the paper only supports discrete action spaces?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper demonstrates several notable strengths across the dimensions of originality, quality, clarity, and significance: \n\n \n\n1. **Originality**: It introduces HASARD, a benchmark specifically designed for vision-based embodied safe reinforcement learning (RL) in complex 3D environments. \n\n \n\n2. **Quality**: Comprehensive design of 6 diverse environments with 3 difficulty levels each, offering a range of challenges. \n\n \n\n3. **Clarity**: The paper is structured in a logical and coherent manner, facilitating the understanding of complex concepts. \n\n \n\n4. **Significance**: The paper Addresses an important need in safe RL research for more realistic and challenging benchmarks. It enables systematic evaluation and comparison of safe RL algorithms in vision-based 3D settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the HASARD, a benchmark designed for egocentric pixel-based safe RL in diverse and stochastic 3D environments. Unlike existing benchmarks, HASARD emphasizes spatial comprehension, short-term planning, and active prediction for high rewards while ensuring safety. It offers three difficulty levels, supporting both soft and hard safety constraints. The benchmark includes heatmaps for visual analysis, aiding in strategy development. By targeting vision-based embodied safe RL, HASARD addresses the need for benchmarks mirroring real-world complexities. The paper's contributions include the design of six novel ViZDoom environments with safety constraints, integration with Sample-Factory for rapid simulation and training. Evaluation of baseline methods within HASARD highlights challenges in balancing performance and safety under constraints."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the paper makes valuable contributions, several areas could be improved: \n\n \n\n1. The paper refers to ViZDoom as a 3D environment, but its pixelated, less detailed graphics compared to modern 3D games challenge this characterization. \n\n \n\n2. **Narrow Range of Baselines**: Evaluations focus primarily on PPO-based algorithms. Incorporating approaches like model-based safe RL or constrained policy optimization (e.g., https://arxiv.org/abs/2210.07573) would enhance the assessment. \n\n \n\n3. **Limited Visual Input Analysis**: Though vision-based learning is emphasized, the paper lacks analysis of how visual complexity influences performance. Exploring different visual conditions (lighting, distractors) and comparing raw pixels with simplified representations would highlight the unique challenges of vision-based safe RL, especially since the visual inputs in the environment appear less realistic. \n\n \n\n4. **Action Space Limitation**: Only discrete action spaces are supported. It is unclear how continuous safe RL algorithms would be benchmarked. \n\n \n\n5. **Real-World Relevance**: The connection between the benchmark tasks and real-world safe RL challenges needs clearer articulation. Providing examples of practical applications would strengthen motivation. \n\n \n\nAddressing these points could strengthen the paper and increase the impact and utility of the HASARD benchmark for the safe RL research community."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A Safe RL benchmark for vision-based learning in complex navigable 3D environments."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hasard,\ntitle={{HASARD}: A Benchmark for Harnessing Safe Reinforcement Learning with Doom},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5BRFddsAai},\nnote={under review}\n}"
},
"abstract": {
"value": "The advancement of safe reinforcement learning (RL) faces numerous obstacles, including the lack of simulation environments, demanding computational requirements, and a lack of widely accepted benchmarks. To address these challenges, we introduce **HASARD** (A Benchmark for **HA**rnessing **SA**fe **R**einforcement Learning with **D**oom), tailored for egocentric pixel-based safe RL. HASARD features a suite of diverse and stochastic 3D environments. Unlike prior vision-based 3D task suites with simple navigation objectives, the environments require spatial comprehension, short-term planning, and active prediction to obtain high rewards while ensuring safety. The benchmark offers three difficulty levels to challenge advanced future methods while providing an easier training loop for more streamlined analysis. Accounting for the variety of potential safety protocols, HASARD supports both soft and hard safety constraints. An empirical evaluation of baseline methods highlights their limitations and demonstrates the benchmark's utility, emphasizing unique algorithmic challenges. The difficulty levels offer a built-in curriculum, enabling more efficient learning of safe policies at higher levels. HASARD utilizes heatmaps to visually trace and analyze agent navigation within the environment, offering an interpretive view of strategy development. Our work is the first benchmark to exclusively target vision-based embodied safe RL, offering a cost-effective and insightful way to explore the potential and boundaries of current and future safe RL methods. The environments, code, and baseline implementations will be open-sourced."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"benchmark",
"game",
"doom",
"vizdoom",
"3D",
"safe RL",
"reinforcement learning",
"constraint",
"difficulty level",
"PPO",
"Lagrange",
"sample-factory",
"vision",
"AI safety"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/737d6c04cde9ff7d558e6ab9f6a957c86fde6c52.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "HASARD: A Benchmark for Harnessing Safe Reinforcement Learning with Doom"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5BSlakturs | Enhancing Compositional Text-to-Image Generation with Reliable Random Seeds | main | Active | Diffusion models;text-to-image generation | generative models | 5;6;6 | 4;5;3 | 4;3;3 | 3;3;3 | 4;3;3 | 5.666667 | 4 | 3.333333 | 3 | 3.333333 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the Weakness for details.\n\nMy main concern is as follows. \n(1) Correctness is decided by the large model CogVLM2. It will also lead to the bias, like preferring non-overlapping layout. \nFinetuning makes the model overfitted. \n\n(2) Fixed System Seed. You mean the input noise is also fixed? Actually, we fixed the input noise.\n\n(3) Overlapped result is hard to generate."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea is easy to follow. \nThe problem is well-designed. \nThe finding is interesting to many diffusion users."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the author explores the random noise for the diffusion-based generation, especially for the text-to-image generation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Section 3.2 are not well-proved for the correlation. I am not convinced about the heatmap results. \n- How do you decide the correct / incorrect in Figure 4? Do this process bring the bias or prefrency over the distribution?\n- Which layer is used for the heatmap? the output of diffusion model before VAE decoder? \n- The four coins can be parallel or any position arrangement. So why the heatmap in Figure 4 is coincidently splited the 4 grids?\n\n2. More compositional generation results and failure cases\nHow to generate the partially overlapped objects? \nThe samples showed are almost no overlapped. \n\n3. The definition of seed. \nSo you just fix the seed rather than the noise? \nEverytime we will resample the noise according to the seed? \nSo why there will be a preferency over certain seed in Section3.3?\n\n4. Minor problem\nWhat are the \"these images\" in abstract? You may training images, which is collected by you? Please specify it. \n\n5. Scalability to unseen prompts.\nHow about 7 or 8 objects? \nHow about the ``boundary'' or ``corner''?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. What are the numbers when compared to that of [1]?\n\n---\n\n2. Comparison against baselines: As 512x512 implementation of Stable Diffusion is used for LMD and Multi Diffusion, comparing it with 768x768 version becomes unfair. What are the numbers for Table 4 when using the 512x512 Stable diffusion of this method instead of the 768x768?\n\n---\n\n3. Mixture of objects for numerical compositions. All the results seem to display numerical compositions of a single object. How are the results when I compose multiple objects, such as \"2 airplanes and 4 birds in the sky\", and how do the baselines compare with this method for such cases?\n\n---\n---\nI will reconsider my rating if these concerns are addressed.\n\nPlease correct me if you think I have misunderstood any aspect of the paper."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The main advantage of this work is that no additional modules/trainable parameters need to be added to the diffusion model which incorporate layouts or bounding boxes like other works usually do.\n2. Extensive experimentation is conducted to validate the reliable seeds hypothesis.\n3. Once reliable seeds are mined, the authors have experimented with a broad spectrum of ways to use that to enhance the model's performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tackles two main aspects of diffusion models: Numerical and spatial generations. The aim is to use the diffusion model as is without additional inputs such as layouts. First, reliable seeds are mined which produce correct results for the numerical and spatial generations. Then, these seeds are used to create a generative dataset and the model is fine-tuned on this dataset to improve performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Baselines: Newer methods to accomplish this task have developed such as [1] after LMD, which show an improvement over LMD. This work should be compared with [1] instead of LMD to demonstrate the efficacy of this approach.\n\nI have clubbed the other points in the questions section\n\n---\n[1] Feng, Yutong, et al. \"Ranni: Taming text-to-image diffusion for accurate instruction following.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The citation format is slightly less standardized and the consistency of the references in the citation section should be ensured.\n\nOne key limitation is the lack of an automatic, inference-time method to select reliable seeds for generating accurate compositions. Would the authors consider developing a mechanism, such as a predictive model or algorithm, to dynamically choose reliable seeds based on prompt characteristics? This would significantly improve the model’s generalizability and practical use.\n\nFine-tuning on self-generated data inherently risks reducing image diversity or amplifying generation biases. Could the authors clarify how they ensured that this self-generated dataset maintains high quality compared to real-world or externally validated datasets? Additionally, what safeguards are in place to prevent potential degradation in image quality or unintended biases?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Provides a novel, data-efficient method to improve compositional accuracy in text-to-image generation by harnessing seed variability.\n2. The automatic generation of a training dataset with reliable seeds reduces the labor-intensive process of manual annotation.\n3. Extensive quantitative and qualitative evaluations demonstrate the approach’s effectiveness in improving both numerical and spatial compositional tasks across different models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenges faced by text-to-image models in handling compositional prompts, such as accurately rendering object quantities and spatial relations. It highlights the impact of initial random seeds on the arrangement and fidelity of generated images, proposing a method to improve model performance by identifying and leveraging “reliable seeds.” The paper’s main contributions include: 1) a generation strategy based on reliable seeds to reduce the need for manual annotations by automatically generating a high-quality dataset; 2) fine-tuning the model on self-generated reliable data to enhance numerical and spatial compositional accuracy; and 3) implementing a seed-based sampling strategy that improves generation accuracy without additional computation or training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The reliance on selected seeds may limit the diversity of generated outputs, as increasing accuracy through reliable seeds could restrict the model’s range of variations.\n2. There is no method presented for automatically selecting reliable seeds during inference, limiting the approach’s applicability to other models and use cases.\n3. Potential decline in overall image generation quality when fine-tuning on self-generated data remains unexplored, especially concerning aesthetics and real-world accuracy.\n4. The approach assumes that data generated with reliable seeds is of higher quality for model fine-tuning, but lacks empirical comparisons with real-world datasets or alternative high-quality sources.\n5. Limited generalization testing to other diffusion models beyond Stable Diffusion and PixArt-α; therefore, the approach’s adaptability to diverse architectures is unclear."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Some seeds are more reliable for compositional text-to-image generation. We propose a seed mining strategy and develop methods to leverage these seeds."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024enhancing,\ntitle={Enhancing Compositional Text-to-Image Generation with Reliable Random Seeds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5BSlakturs},\nnote={under review}\n}"
},
"abstract": {
"value": "Text-to-image diffusion models have demonstrated remarkable capability in generating realistic images from arbitrary text prompts. However, they often produce inconsistent results for compositional prompts such as \"two dogs\" or \"a penguin on the right of a bowl\". Understanding these inconsistencies is crucial for reliable image generation. In this paper, we highlight the significant role of initial noise in these inconsistencies, where certain noise patterns are more reliable for compositional prompts than others. Our analyses reveal that different initial random seeds tend to guide the model to place objects in distinct image areas, potentially adhering to specific patterns of camera angles and image composition associated with the seed. To improve the model's compositional ability, we propose a method for mining these reliable cases, resulting in a curated training set of generated images without requiring any manual annotation. \nBy fine-tuning text-to-image models on these images, we significantly enhance their compositional capabilities. For numerical composition, we observe relative increases of 29.3\\% and 19.5\\% for Stable Diffusion and PixArt-$\\alpha$, respectively. Spatial composition sees even larger gains, with 60.7\\% for Stable Diffusion and 21.1\\% for PixArt-$\\alpha$."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion models",
"text-to-image generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/09a840affd6762bf5bd2cf6ed20276e009598966.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Enhancing Compositional Text-to-Image Generation with Reliable Random Seeds"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5BXWhVbHAK | Can One Modality Model Synergize Training of Other Modality Models? | main | Active | Multimodal learning;Representation learning;learning theory | learning theory | 3;5;8 | 3;4;4 | 2;3;3 | 2;2;3 | 3;4;4 | 5.333333 | 3.666667 | 2.666667 | 2.333333 | 3.666667 | 0.802955 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Regarding Remark 2.1. \"δ does not hinder the synergy\", you say that\n\"We can extract an imperfect feature representation from Pj by giving\nimperfect input to the modality Mj . This allows ˆzj exist in the distribution Pj 2. Consequently, ˆzj\nis closer to or part of the latent space of the Mj than to that of the Mi or the true latent space.\"\n\nWouldn't this quite directly depend on how far \\hat P_j is from P_j, as well as how aligned P_i is to P_j to begin with? Surely one could construct counter examples with adversarially chosen δ? This may not be much of a practical concern for reasonably close \\hat P_j but is this statement not a bit strong in the general case?\nExpanding on this, perhaps this could be empirically verified by exploring different levels of noise to introduce in \\hat z^j, particularly in the L -> V task as suggested above. Have you perhaps already considered / explored different noising functions and compared their impact?\n\nFor the V -> A case in AVMNIST, you say \"For the [V→A] case with AVMNIST, we use randomly shuffled images from AVMNIST as ˆzj in audio classification tasks\". Could you clarify the random sampling in this case? Is it a random image from the entire dataset or a random image from the samples of the same target class? If it is a random image (i.e. unrelated to the paired modality at all), this seems significantly more \"noise\" than in other settings, it'd be great to understand a bit better to understand the motivation for this choice and perhaps similar experiments for other modalities.\n\nOn a general level, if we assume that the target distribution for a modality encoder g_i is similar to the one of a pre-trained encoder g_j of a different modality, the proposed latent alignment loss has some similarity to knowledge distillation. In this field there's been some notable prior work that suggests that the success of KD is partially attributable not only to a superior knowledge of the teacher but also to benefits of the training strategy itself. Notably, Born Again Networks (Furlanello et al., 2018) suggests a simple strategy of self-distillation can improve performance. Yuan Li et a., 2020 explores this further in \"Revisiting Knowledge Distillation via Label Smoothing Regularization\". This is relevant to this work since it could suggest a different mechanism leading to the empirically observed improvements that is less about multimodal transfer and perhaps more about a sort of regularization effect of the added latent loss. Has this been considered?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The work considers a range of relevant modalities and conducts experiments across language, vision, and audio in various cross-combinations.\nThe mathematical framework introduced is intuitive and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper aims to answer whether imperfectly aligned paired data from other modalities can help learning in a multimodal setting.\nSpecifically, the authors propose an additional latent loss, to directly align the target modalities' latent representation with that of the output of a pre-trained encoder of the secondary (supportive) modality. The authors introduce and study a theoretical framework and show that even imperfect paired data can help approximate a hypothetical, perfectly aligned representation. They further demonstrate empirically that the additional latent loss led to stronger performance of the target modalities' encoder across various tasks and modalities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper asserts in multiple locations that prevailing multimodal learning methods require \"perfectly paired datasets\" (quote from the introduction) between modalities. This, in my opinion, is not accurate an accurate representation of the thinking in the field. The authors cite CLIP as a notable multimodal model, which famously is trained on noisy web-scale paired data in the form of image alt text. Training multimodal models on noisy labels such as alt-text from web-scraped images is common practice and widely established (Radford et al. 2021, Dosovitskiy et al. 2020.,...). While improving the alignment of the training modalities is generally seen as desirable (e.g. Fang et al., 2023), \"perfection\" seems not a requirement.\nThe paper does not cite and / or discuss other works in the space of aligning multiple modalities without direct paired supervision data, including popular works such as ImageBind (Girdhar, et al., 2023) or 4M (Mizrahi et al., 2024). These works do not (solely) rely on paired multimodal data, seemingly directly addressing the limitations discussed by this work in section 2.3.\nThis, together with arguably understating how much alignment on noisily paired data has been previously studied in the field, arguably limits the novelty of this work.\n\nOne of the main statements of this work is that the label does not need to be perfectly paired / can be noisy. However, the transformations to introduce this noise studied in the work may not be sufficiently realistic. For example for the L -> V case, the noisy label \\hat z^j is constructed by embedding the text \"This is about (Class|Emotion) #.\" as per table 5. In terms of the measured downstream task, which is classification, this label is arguably not noisy, but perfectly represents the target task. In table 4 it is shown that changing this supervision signal with a caption produced by LLaVA leads to only minor improvement, which may not be surprising in this setting. (See the Questions section for suggestions around this.)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Figure 2 right, $Z_i$ should be $Z_j$."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Novel insights: The work challenges common assumptions about requiring paired supervision for multimodal learning and demonstrates unexpected improvements using single-modality pre-training and synergies between seemingly unrelated modalities.\n\n- Strong theoretical foundation: The paper provides rigorous mathematical proofs for how and why cross-modal learning can work without paired supervision, establishing bounds on the interpolation coefficient $\\alpha$ and showing the existence of superior interpolated representations.\n\n- Comprehensive empirical validation: The authors demonstrate their approach across multiple modality pairs (Vision-Language, Vision-Audio, Language-Audio) and various architectures, showing consistent improvements across different settings and tasks. Results include not just standard classification metrics but also out-of-distribution generalization and robustness benchmarks, showing broad improvements across different evaluation criteria."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates whether one modality model can enhance the training of another modality model without requiring paired multimodal supervision. The authors propose both theoretical and empirical evidence that imperfect supervision from one modality (e.g., language) can improve the performance of another modality (e.g., vision). They establish mathematical foundations showing that an interpolated representation between two modalities can outperform single-modality representations, even with imperfect cross-modal supervision. The work is validated through extensive experiments across vision, language and audio modalities, demonstrating consistent performance improvements. For example, in the vision domain, they show improvements of 1.5-2.5% on ImageNet classification and similar gains on robustness benchmarks by leveraging simple language prompts during training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Other language supervision: The language prompts used (e.g., \"This is about Class #\") are quite basic. It would be interesting to see how the method performs with more complex or varied language supervision.\n\n- Theoretical assumptions: Some theoretical assumptions (e.g., Assumption 1 about $\\Delta_{ij} \\ge 0$) could benefit from more empirical validation or discussion of when they might not hold."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please check the weaknesses section, in particular the question about how leveraging other modalities is done."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper tackles an important problem: obtaining paired data is often challenging in many setups.\n* The approach is backed by both theoretical analysis and empirical results, showing clear improvements over baseline methods.\n* The paper is well-structured, with illustrations that help clarify key messages."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper demonstrates that performance on a target modality can be improved by leveraging another modality, even without paired samples. Both theoretical and empirical evidence are provided to support this claim, showcasing the method's effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The main contribution—showing that using an unpaired modality can improve performance—has already been explored in prior works. For instance, some studies demonstrate leveraging unpaired modalities through pretraining on one modality and fine-tuning on another, like from image to video to audio [1], or from text to image-text [2]. Additionally, the issue of handling unpaired or missing modalities has been addressed before, yet the paper does not discuss relevant works in this domain [3,4,5]. Including this discussion would better position the paper.\n\n* It is not clear why the author decides to leverage other modality through an l2 loss between the features spaces. Other design choices can be explored. For example, concatenation, addition or other multimodal features fusion techniques.\n\n* The experiments use relatively small models on classification tasks. It remains unclear whether the proposed method would be effective on larger, more complex, maybe generative models (e.g., Multimodal LLMs, CLIP).\n\n[1] Shukor, Mustafa, et al. \"Unified model for image, video, audio and language tasks.\" TMLR (2023).\n\n[2] Liu, Haotian, et al. \"Improved baselines with visual instruction tuning.\" CVPR 2024.\n\n[3] Kim, Donggeun, and Taesup Kim. \"Missing Modality Prediction for Unpaired Multimodal Learning via Joint Embedding of Unimodal Models.\" ECCV (2024).\n\n[4] Lee, Yi-Lun, et al. \"Multimodal prompting with missing modalities for visual recognition.\" CVPR. 2023.\n\n[5] Wang, Hu, et al. \"Multi-modal learning with missing modality via shared-specific feature modelling.\" CVPR. 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024can,\ntitle={Can One Modality Model Synergize Training of Other Modality Models?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5BXWhVbHAK},\nnote={under review}\n}"
},
"abstract": {
"value": "Learning with multiple modalities has recently demonstrated significant gains in many domains by maximizing the shared information across modalities. However, the current approaches strongly rely on high-quality paired datasets, which allow co-training from the paired labels from different modalities. In this context, we raise a pivotal question: Can a model with one modality synergize the training of other models with the different modalities, even without the paired multimodal labels? Our answer is 'Yes'. As a figurative description, we argue that a writer, i.e., a language model, can promote the training of a painter, i.e., a visual model, even without the paired ground truth of text and image. We theoretically argue that a superior representation can be achieved by the synergy between two different modalities without paired supervision. As proofs of concept, we broadly confirm the considerable performance gains from the synergy among visual, language, and audio models. From a theoretical viewpoint, we first establish a mathematical foundation of the synergy between two different modality models, where each one is trained with its own modality. From a practical viewpoint, our work aims to broaden the scope of multimodal learning to encompass the synergistic usage of single-modality models, relieving a strong limitation of paired supervision."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multimodal learning",
"Representation learning",
"learning theory"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/39603861b39df9ea665fe6c645a12f4d8ce88957.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Can One Modality Model Synergize Training of Other Modality Models?"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5BjQOUXq7i | RegMix: Data Mixture as Regression for Language Model Pre-training | main | Active | language model pre-training;data mixture;regression | foundation or frontier models, including LLMs | 5;6;8;8;8 | 3;4;3;3;4 | 2;3;3;3;4 | 2;3;3;4;4 | 3;3;3;4;4 | 7 | 3.4 | 3 | 3.2 | 3.4 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) Can you further explain why the choice of multiplying the e token distribution by a value from 0.1 to 5.0? Is this a standard practice? Were other ranges tested, and if so, what were the results? \n - Also, could you discuss the rationale behind this range and whether you conducted any sensitivity analyses to determine its impact on the results?\n\n2) Given sufficient computation available, would segmenting domains further (into finer-grained topic-based segments) likely improve model performance or lead to more effective mixture predictions? Do you think that finer segmentation would affect the rank invariance assumption? I suggest the authors to discuss the potential impacts and challenges of a finer-grained domain segmentation in their future work section. This would help address the broader implications and limitations of their approach."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The paper introduces a novel, regression-based approach for selecting data mixtures that reduces computational costs in language model training. The approach offers an efficient alternative to traditional dynamic or heuristic data allocation methods, making a valuable contribution to the field.\n\n- The paper is technically robust and well-structured, with extensive validation across diverse data scenarios. It empirically supports the rank invariance hypothesis and uses clear, well-structured figures to illustrate the method and results, enhancing reader understanding. REGMIX’s ability to match or outperform other DoReMi and other methods with significantly less compute is a compelling outcome for LLM pre-training efficiency.\n\n- The paper tackles a pertinent problem in LLM pre-training. Given the increasing size of training data and models, this approach could have a significant impact on the field, especially in reducing computational costs and environmental impact."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work introduces REGMIX, a method for optimizing data mixtures to enhance language model training efficiency. REGMIX treats data mixture selection as a regression task, using small proxy models to predict the performance of different mixtures and identify the best one, enabling larger models to be trained with significantly less compute. Key findings include:\n- REGMIX’s mixtures perform as well as or better than those selected by human experts and prior methods like DoReMi, with only a fraction of the compute cost.\n- Data mixture has a substantial impact on downstream performance, with single-task performance differences reaching up to 14.6%.\n- General web corpora (such as CommonCrawl) outperform traditionally high-quality data like Wikipedia in driving downstream performance.\n- Domain interactions are complex and often counterintuitive, highlighting the value of automated approaches like REGMIX.\n- Data mixture effects go beyond scaling laws, with REGMIX capturing the complexity by jointly optimizing across all domains."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- To maximize impact, the authors could highlight specific scenarios where the approach enables previously infeasible experiments due to resource constraints. Also, adding a broader discussion on trade-offs of the method (e.g., scenarios where the rank invariance assumption might not hold) would help readers assess its practical relevance and future applicability.\n\n- The work could have used standardized computation metrics, such as FLOPs or GPU hours, to allow clearer comparison of the method efficiency gains relative to baselines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "How do we decide the size for proxy model and training tokens.?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. RegMix introduces a fresh approach by framing data mixture selection as a regression problem rather than relying on complex optimizations or heuristics, making the process scalable and computationally efficient.\n\n2. The paper’s experimental setup is robust, with 512 small proxy models across diverse data mixtures, creating a solid regression model for data selection.\n\n3. The paper is well-organized, clearly explaining the methodology and experiments. It introduces the hypothesis of rank invariance in data mixtures, supported by visual aids, making the regression model’s role easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents RegMix, a new method for optimizing data mixtures in pre-training large language models (LLMs). Recognizing the importance of data composition, the authors frame mixture selection as a regression task. RegMix uses small proxy models trained on various data mixtures to build a predictive model that identifies the optimal mixture for larger models.\n\nThe authors conduct extensive experiments to validate the approach, showing that models trained with RegMix-selected data mixtures outperform those trained with mixtures chosen by other methods, including human selection and DoReMi, while utilizing only 10% of the compute budget.\n\nThe authors provide insights into the effects of data mixtures, offering empirical evidence that data mixtures can significantly impact performance, with single-task performance variations reaching up to 14.6%. They also emphasize the complex interactions that occur between different data domains."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper conducts a set of small-proxy models trained with small-scale tokens.\n\nThe paper only experiments with 1M models with 1B tokens. \nIt is unclear how to decide the size of the proxy model parameter and training token."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can the authors provide more theoretical or empirical evidence to support the rank invariance assumption? How does this assumption hold up with significant changes in model scale and data distribution?\n- How does REGMIX perform with proxy models larger than 1B parameters? Can the authors provide any preliminary results or insights on this? Or could the obtained data mixtures guide us to train a better model using much more tokens, e.g., 100B?\n- Can the authors provide a detailed comparison of the computational resources required by REGMIX and other methods? This would help in understanding the practical feasibility of the method."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper presents a novel method, REGMIX, which formulates the data mixture selection problem as a regression task. This is a creative approach that leverages small-scale proxy models to predict optimal data mixtures for large-scale models.\n- The authors conducted extensive experiments, training 512 models with 1M parameters on 1B tokens to fit the regression model. They then validated this model by training a 1B parameter model on 25B tokens, showing superior performance compared to human selection and the DoReMi method.\n- The method allows for parallel training of small proxy models, making it more scalable than previous approaches that require training a single model for a long time.\n- The paper provides several interesting findings, such as the significant impact of data mixtures on performance, the strong positive correlation of web corpora with downstream performance, and the complex interactions between domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a method called REGMIX for automatically selecting an effective data mixture to optimize the pre-training of large language models. REGMIX formulates the data mixture selection as a regression task, training a set of small models with diverse data mixtures and fitting a regression model to predict their performance. The fitted regression model is then used to simulate and identify the top-performing data mixture, which is subsequently used to train a large-scale model. The empirical results demonstrate that REGMIX can improve downstream task performance and achieves results comparable to or surpassing the DoReMi method while using only 10% of the compute budget."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The key assumption of rank invariance of data mixtures across different model sizes and token counts is not thoroughly validated. This assumption might not hold in all cases, especially with significant changes in model scale and data distribution.\n- The paper claims stability across different proxy model sizes, but the experiments are limited to models with up to 1B parameters. It remains unclear if the method would be equally effective for much larger models commonly used in practice (e.g., 7B or 70B parameters). If so, the additional computation cost could not be ignored. \n- The authors only trained 25B tokens using the obtained data mixtures. This raises the question of whether the data scale could be enlarged to 50 times or even 100 times. And can LLM sitll benefit from the obtained data mixture?\n- Although the method is more efficient than some previous approaches, training 512 small models still requires substantial computational resources. This could be a limitation for teams with limited access to such resources. The trade-off between performance gains and additional costs may not always hold when the model scales up."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written and easy to follow.\n2. The author identifies a helpful assumption, namely ranking invariance regarding training scales, which helps reduce the cost of tuning data mixtures in this paper.\n3. The proposed method is simple to implement and efficient, thus appealing to try in practice.\n4. The paper contains extensive experiments to show optimizing data mixtures improves model performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper formulates data mixing problems as a regression task. The authors propose to search data mixtures on small models and fit regression models to predict the optimal data mixture, which is then transferred to larger-scale model training. The authors empirically show the rankings of different data mixtures hold consistent between small and large-scale training. And data mixture found to be optimal at small scales can lead to improved performance compared to human heuristics and previous methods at large scales."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The feasibility of treating the data mixing problem as a regression problem has been an idea unveiled by previous studies [1,2], as the authors also mentioned in the paper.\n2. The methods experiment on as many as 512 training runs. It is unclear whether the regression step is still necessary with so many experimented mixtures. This makes the proposed method actually a grid search with a small-scale proxy.\n3. The proposed method highly depends on the assumption of ranking invariance regarding scales. The author only provides limited empirical results on this assumption. However, such an assumption is questionable according to [3]. It would be better if the authors provide more discussion to explain the scope where this assumption holds.\n\n[1] Data Mixing Made Efficient: A Bivariate Scaling Law for Language Model Pretraining\n\n[2] Data Mixing Laws: Optimizing Data Mixtures by Predicting Language Modeling Performance\n\n[3] Scaling Laws for Data Filtering— Data Curation cannot be Compute Agnostic"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. in table 2, why do you only list the MSE for 1M model but rank correlation for all model sizes? what's the MSE for the other two sizes? Why is rank correlation a better metric here?\n2. on line 243, the author mentioned the rank invariance hypothesis. However, it's not super clear to me what exactly this means and how this hypothesis is verified by the experimental results. Could you provide a clear definition of the rank invariance hypothesis and explicitly state how the experimental results support or verify this hypothesis?\n3. there are some related work on data selection that falls into the group-level data selection category: https://aclanthology.org/2020.acl-main.754/"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. the proposed method is novel and has practical impact to LLM training\n2. the authors evaluated the predicted mixture on 1B model and compared to a few prior methods based on the downstream performance of the model\n3. the paper also has good analysis and insights about regarding optimizing data mixture ratio, the relationship between validation PPL loss and downstream task performance, and the issues regarding scaling laws for data mixture."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method that trains a simple regression model over data mixture ratios and the LLM loss on very small models, and then use the trained regressor to predict the best data mixture configuration for training larger scale models. The method is interesting and the experiments are relatively comprehensive. The paper considers two regression methods and verified the predicted mixture on a 1B model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. there are some details of the regression model that's not clearly explained. It is not clear to me how the authors fit the regression model, how many data points are used to fit the models. It would be nice to have a pseudocode or open-sourced script\n2. the mixture weights for the baseline DoReMi is directly taken from the paper. However, it's not clear if the optimal weights learned using the DoReMi method would be different due to model and data processing differences. It's probably better to re-learn the weights using the small model in the current experiment setup."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce RegMix, an automated data mixture method that formulates data mixture as a regression problem. RegMix achieves a 6.3% improvement over human selection on the HellaSwag benchmark, with only a 2% extra training FLOPs."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024regmix,\ntitle={RegMix: Data Mixture as Regression for Language Model Pre-training},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5BjQOUXq7i},\nnote={under review}\n}"
},
"abstract": {
"value": "The data mixture for large language model pre-training significantly impacts performance, yet how to determine an effective mixture remains unclear. We propose RegMix to automatically identify a high-performing data mixture by formulating it as a regression task. RegMix involves training a set of small models with diverse data mixtures and fitting a regression model to predict their performance given their respective mixtures. With the fitted regression model, we simulate the top-ranked mixture and use it to train a large-scale model with orders of magnitude more compute. To empirically validate RegMix, we train 512 models with 1M parameters for 1B tokens of different mixtures to fit the regression model and find the optimal mixture. Using this mixture we train a 1B parameter model for 25B tokens (i.e. 1000x larger and 25x longer) which we find performs best among 64 candidate 1B parameter models with other mixtures. Further, our method outperforms both human selection and DoReMi in terms of both validation loss and downstream performance. Our experiments also show that (1) Data mixtures significantly impact performance with single-task performance variations of up to 14.6%; (2) Web corpora rather than data perceived as high-quality like Wikipedia have the strongest positive correlation with downstream performance; (3) Domains interact in complex ways often contradicting common sense, thus automatic approaches like RegMix are needed."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"language model pre-training",
"data mixture",
"regression"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/bfb9ffa8a1a126165593b0b88296080ae7f49437.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/977302e354f03fd926f21dcc735e4733ebc1ce5c.zip"
},
"title": {
"value": "RegMix: Data Mixture as Regression for Language Model Pre-training"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5CHcmVzbAz | SePPO: Semi-Policy Preference Optimization for Diffusion Alignment | main | Active | Reinforcement Learning;Diffusion Model;Image Generation;Video Generation | generative models | 5;5;5;5 | 3;5;4;3 | 3;2;2;2 | 2;2;2;2 | 3;3;3;2 | 5 | 3.75 | 2.25 | 2 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper proposes a combination of semi-policy optimization with the AAF mechanism without requiring reward models or paired human-annotated data.\n2. Comprehensive Empirical Validation: The work provides comprehensive experimental validation across both text-to-image and text-to-video domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes SePPO, a novel preference optimization method for aligning diffusion models with human preferences without requiring reward models or paired human-annotated data. The key innovations are: 1) Using previous checkpoints as reference models to generate on-policy reference samples, 2) Introducing a strategy for reference model selection that enhances policy space exploration, and 3) Developing an Anchor-based Adaptive Flipper (AAF) to assess reference sample quality. The method shows strong performance on both text-to-image and text-to-video generation tasks, outperforming previous approaches across multiple metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I find the definition of \"better\" (L277 in bold) to be confusing and the same term shows up in Theorem 4.1 seems lacking rigor. I think what the authors mean is that \"closer\" to the preferred sample $x_0^w$, but closer to $x_0^w$ does not necessarily mean better since it depends on the metric considered. Given a reward function where $r(x_0^w)>r(x_0^l)$, whether a new sample $x_1, x_2$ has a higher reward is dependent on the reward landscape, not how close it is to $x_0^w$.\n2. Given 1, I think the main spirit of the proposed method is to fit the preferred distribution, similar to SPIN. In that sense, I am confused about why the proposed method is expected to do better since the advantage of the proposed method compared to SPIN is not clearly discussed in the paper. For example, what is missing in SPIN that the proposed method can do? \n3. Lack of human evaluation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weakness section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper performs experiments on both T2I and T2V generation tasks.\n\n- The paper is easy to follow and understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes SePPO, leveraging two techniques to improve the previous SPIN-Diffusion method, including 1) randomly selecting the reference policy from all previous checkpoints and 2) A heuristic (anchor-based criterion) to determine whether a reference sample will likely win or lose. The paper performs experiments on both T2I and T2V tasks to demonstrate the effectiveness of their methods by comparing them with several different methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Technical contributions**: The proposed techniques involve 1) randomly selecting the reference policy from the checkpoint at iteration 0 (DITTO) and the latest iteration t - 1 (SPIN-Diffusion). 2) A heuristic (anchor-based criterion) to determine whether a reference sample will likely win or lose, and the learning loss is adjusted accordingly. Thus, the technical contributions contributions of this work are limited.\n\n- **Incorrect Definition of on-policy examples**: The definitions of on-policy and off-policy learning are well-defined in reinforcement learning literature [1]. Specifically, on-policy learning refers to the settings when the training examples are sampled from the current policy ($\\pi_\\theta$) being learned. However, this work treats the reference samples $\\mathbf{x}^{ref}\\_0$ sampled from $\\pi\\_\\{ref}$ as \"on-policy\" examples (e.g., Line 250 - 252), **which is incorrect**. In fact, both $\\mathbf{x}^{ref}\\_0$ and $\\mathbf{x}^w\\_0$ are off-policy samples since none of them is sampled from the current policy $\\pi_\\theta$.\n\n- **Limited Performance Improvement**: According to Tables 1 and 2, the performance improvement of the SePPO$^w$ over SPIN-Diffusion is trivial in terms of PickScore and HPSv2 score. The improvement is only obvious when evaluating with ImageReward. Additionally, SPIN-Diffusion even outperforms the proposed SePPO$^w$ by an obvious margin in terms of Aesthetic score. Therefore, I would recommend conducting a human evaluation to corroborate the results as in [2].\n\n- **Evaluation protocol for video generation tasks**: The metrics used in Table 3 are not meaningful enough. As the model is trained on ChronoMagic-Bench-150, I recommend reporting the results by following the evaluation protocol in Tables 3 & 4 of the ChronoMagic-Bench paper [4].\n\n- **Missing Citations**: Please cite the related works [2] and [3], which tackle T2I and T2V model alignment by learning from reward models.\n\n**Minor points**\n1. Line 025: \"winning or losing images\" --> \"winning or losing examples\". Since the proposed method is not limited to image generation, please revise similar errors throughout the paper. \n\n2. I recommend not using too many subsubsections. Furthermore, avoid using unnecessary new lines when formalizing the optimization problems (e.g., Equation 5). If you are worried about the page limit, please include more qualitative examples in the main text.\n\n3. I suggest selecting a different abbreviation for your method. PPO [5] in is widely recognized as an algorithm focused on learning a reward function. Since your SePPO is in the self-play finetuning family and is unrelated to PPO, using this acronym may lead to confusion.\n\n[1] Sutton, Richard S. \"Reinforcement learning: An introduction.\" A Bradford Book (2018).\n\n[2] Li et al., \"Reward Guided Latent Consistency Distillation\", TMLR 2024\n\n[3] Li et al., \"T2V-Turbo: Breaking the Quality Bottleneck of Video Consistency Model with Mixed Reward Feedback\", NeurIPS 2024\n\n[4] Yuan et al., \"ChronoMagic-Bench: A Benchmark for Metamorphic Evaluation of Text-to-Time-lapse Video Generation\". NeurIPS 2024\n\n[5] Schulman et al., \"Proximal Policy Optimization Algorithms\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Generally speaking, methods like Diffusion-DPO requires human-annotated data pairs and SePPO does not. The upper bound of aligning model outputs using annotated data pairs should be higher than SePPO, which solely relies on the model itself. Can the authors present an explanation for this?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. **Practical Approach**: The SePPO method offers a practical solution for preference alignment without the need for human annotation or a reward model, which reduces significant labor costs.\n2. **Clear Writing and Presentation**: The submission is well-written and formatted, making it easy to follow and understand.\n3. **Effective Sample Filtering**: The Anchor-based Adaptive Flipper (AAF) criterion is a useful addition, as it helps to filter uncertain samples and enhances model robustness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Semi-Policy Preference Optimization for fine-tuning diffusion models in visual generation, bypassing reward models and human annotations. SePPO uses past model checkpoints to generate on-policy reference samples, replacing “losing” images, and focuses on optimizing only \"winning\" samples. An anchor-based criterion selectively learns from these reference samples, mitigating performance drops from uncertain quality. SePPO outperforms existing methods in text-to-image and shows strong results in text-to-video benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Literature Review and Comparison**: While preliminary experiments are presented for text-to-video models, the paper lacks a thorough literature review on this topic and has limited comparisons in the text-to-video experiments, making the evaluation seem somewhat incomplete. Improving the Related Work section would strengthen the context and position of SePPO.\n2. **Table Readability**: Moving the annotation explanations to the table captions could improve table readability.\n3. **Unclear Justification for Theorem 4.1**: The key of instantiating SePPO is the Theorem 4.1, however, it remains a question to me about this rationality. Specifically, if the reference model has a higher probability of generating noise compared to the current model, then in this situation, we should say this model is a better model for generating this image. We cannot assert the quality of this generated image."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThis paper employs thorough quantitative metrics such as PickScore and HPSv2. The use of ablation studies is commendable, clearly delineating the contributions of individual components of the proposed model.\n2.\tThe results are well-presented, with clear visualizations and comprehensive tables that facilitate an understanding of performance metrics across different models.\n3.\tThe proposed method is simple and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a diffusion model to improve sample quality and diversity in generative tasks. The authors introduce an algorithm that integrates an Anchor-based Adaptive Flipper. To substantiate the claims, a series of comprehensive experiments, including quantitative evaluations against several state-of-the-art models and detailed ablation studies, are presented."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe proposed method is a bit tricky, which may limit its contribution. Therefore, I suggest the authors conduct a deeper theoretical analysis of the proposed method.\n2.\tThe insights from Theorem 4.1 are quite intuitive and easy to understand. I suggest the authors put Theorem 4.1 in the Appendix.\n3.\tThe essential reason why randomly sampling previous checkpoints as a reference model with AAF achieves the best performance is still unclear. I suggest the authors theoretically analyze the effectiveness of the proposed method, which could significantly strengthen this work. At least, the authors need to analyze in which cases, using the latest model is better than randomly sampling previous checkpoints as the reference model."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose SePPO, a method to align diffusion models without reward models or human-annotated data. SePPO outperforms previous methods, achieving a PickScore of 21.57 on Pick-a-Pic and excels in text-to-video tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024seppo,\ntitle={Se{PPO}: Semi-Policy Preference Optimization for Diffusion Alignment},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5CHcmVzbAz},\nnote={under review}\n}"
},
"abstract": {
"value": "Reinforcement learning from human feedback (RLHF) methods are emerging as a way to fine-tune diffusion models (DMs) for visual generation. However, commonly used on-policy strategies are limited by the generalization capability of the reward model, while off-policy approaches require large amounts of difficult-to-obtain paired human-annotated data, particularly in visual generation tasks. To address the limitations of both on- and off-policy RLHF, we propose a preference optimization method that aligns DMs with preferences without relying on reward models or paired human-annotated data. Specifically, we introduce a Semi-Policy Preference Optimization (SePPO) method. SePPO leverages previous checkpoints as reference models while using them to generate on-policy reference samples, which replace “losing images” in preference pairs. This approach allows us to optimize using only off-policy “winning images”. Furthermore, we design a strategy for reference model selection that expands the exploration in the policy space. Notably, we do not simply treat reference samples as negative examples for learning. Instead, we design an anchor-based criterion to assess whether the reference samples are likely to be winning or losing images, allowing the model to selectively learn from the generated reference samples. This approach mitigates performance degradation caused by the uncertainty in reference sample quality. We validate SePPO across both text-to-image and text-to-video benchmarks. SePPO surpasses all previous approaches on the text-to-image benchmarks and also demonstrates outstanding performance on the text-to-video benchmarks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement Learning",
"Diffusion Model",
"Image Generation",
"Video Generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f0b9908ccbb820d1198bdba343d932f5f114a6eb.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/6272a7664949b2c7cd8b9e72c912a0a6e1633a29.zip"
},
"title": {
"value": "SePPO: Semi-Policy Preference Optimization for Diffusion Alignment"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5DT0t5NylU | Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning | main | Active | 3D Large Language Model;3D Multimodal Learning | foundation or frontier models, including LLMs | 5;5;5;6;6 | 3;4;5;3;3 | 3;2;3;3;3 | 2;2;2;4;3 | 3;3;3;3;3 | 5.4 | 3.6 | 2.8 | 2.6 | 3 | -0.612372 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Table 1 does not clarify what kinds of LLM each method uses but it is worth doing that since multimodal LLM performance usually depends on LLM performance. \\\n- What is the baseline in Table 3? I couldn't find out the network architecture of the baseline."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "• The paper introduces a large-scale 3D scene-instruction dataset that includes diverse instruction types, integrating varied instruction styles, existing benchmark instructions, and challenging adversarial instructions, enhancing the model’s robustness and generalization.\\\n• It proposes novel architectures that effectively leverage both 2D and 3D object-centric features, enabling richer spatial understanding and stronger object-grounding capabilities in complex 3D environments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Robin3D, a 3D large language model trained to follow instructions in 3D environments using the Robust Instruction Generation (RIG) engine, which creates a one-million-sample dataset. RIG generates Adversarial and Diverse instruction data to improve Robin3D’s discriminative power and generalization. Robin3D employs a Relation-Augmented Projector for spatial understanding and IDFeature Bonding for object grounding, achieving notable improvements over previous models, including a 7.8% gain in grounding and 6.9% in captioning without task-specific fine-tuning.\nWhile Robin3D performs impressively across multiple ScanNet benchmarks, as noted in the weaknesses, some concerns remain regarding its network architecture and experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "• Relies on off-the-shelf 3D instance segmentation models trained on ScanNet with closed-set categories. I recommend the authors to consider Segment3D [1] for open-vocab, class-agnostic segmentation.\\\n• Cropping instance-level point clouds and applying object-level 3D point cloud CLIP (Uni3D) can limit the receptive fields and be computationally heavy. I recommend the authors to try scene-level CLIP (OpenScene [2], RegionPLC[3]) and then cropping the output features.\\\n• Table 1 reports only traditional NLP metrics (e.g., BLEU, CIDEr, METEOR, Rouge). I recommend the authors include LLM-based evaluation (e.g., GPT or Mistral) for better alignment with human assessment.\\\n• The experiments are limited to the ScanNet dataset. I recommend that the authors expand to other datasets (e.g., SceneVerse [4]) for broader evaluation.\n\n[1] Huang et al., \"Segment3D: Learning Fine-Grained Class-Agnostic 3D Segmentation without Manual Labels\", ECCV, 2024.\\\n[2] Peng et al., \"OpenScene: 3D Scene Understanding with Open Vocabularies\", CVPR, 2023.\\\n[3] Yang et al., \"RegionPLC: Regional Point-Language Contrastive Learning for Open-World 3D Scene Understanding\", CVPR, 2024.\\\n[4] Jia et al., \"SceneVerse: Scaling 3D Vision-Language Learning for Grounded Scene Understanding\", ECCV, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The motivation in the paper is somewhat mixed. Although it emphasizes pre-training with adversarial samples, it also highlights improvements through the Relation-Augmented Projector (RAP) and ID-Feature Bonding (IFB), which may seem like an attempt to pad contributions.\n\n2. The ablation study shows that RAP and IFB contribute less to 3D QA (with low improvements in Table 3's ScanQA and SQA3D) but significantly help 3D grounding. Can the authors explain why?\n\n3. The paper lacks details on the prompts used for Adversarial Data Generation and the data creation process. Is the input for adversarial samples only the ground truth prompt?\n\n4. The ablation for Adversarial Data is insufficient, making it unclear whether the performance improvement is due to the increase in data volume or specifically from the adversarial samples.\n\n5. The authors should compare methods like VLM-Grounder[1] and Coarse Correspondences[2] using video as a modality.\n\n6. Should the authors consider extending their approach to non-ScanNet scenes?\n\n7. The pre-training work should provide training configurations and training time.\n\n8. Can the proposed RIG be extended to the point level to enhance point-level LLM performance, such as with PointLLM [3] or GPT-4Point [4]? Additionally, could it be generalized to outdoor 3D LLMs like DriveLM [5] or LiDAR-LLM [6]? It would be beneficial for the authors to discuss this in the paper.\n\n[1] A VLM Agent for Zero-Shot 3D Visual Grounding\n\n[2] Coarse Correspondence Elicit 3D Spacetime Understanding in Multimodal Language Model\n\n[3] PointLLM: Empowering Large Language Models to Understand Point Clouds\n\n[4] GPT4Point: A Unified Framework for Point-Language Understanding and Generation\n\n[5] DriveLM: Driving with Graph Visual Question Answering\n\n[6] LiDAR-LLM: Exploring the Potential of Large Language Models for 3D LiDAR Understanding"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Reasonable motivation\n - Expands existing ScanNet 3D text annotations through the data engine.\n2. Strong experimental results\n - Demonstrates excellent performance.\n3. Clear and complete paper writing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Robin3D, a powerful 3D Large Language Model (3DLLM) trained using large-scale instruction-following data generated by an innovative Robust Instruction Generation (RIG) engine to address the lack of robustness and diversity in current 3DLLMs' training data.\n\nBesides, Robin3D incorporates two important modules: Relation-Augmented Projector (RAP) and ID-Feature Bonding (IFB). RAP enhances the model's understanding of spatial relationships between objects, while IFB strengthens the connection between object IDs and features, improving the model's referring and grounding capabilities, enabling it to better handle complex instructions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have some questions about this paper that need further discussion. Please see them below.\n\nIf the authors can address my concerns, I am willing to raise my score."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please address the questions raised in the weakness section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper targets the challening problem of 3D LLM for ground task as well as caption task.\n2. To address the problem, the paper presents a robust instruction generation engine and 1M instruction-following data has been presented.\n3. The paper obtains promising experimental results on five 3D multimoal learning benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper targets the interesting problem of instruction tuning on 3D LLMs. As there lacks sufficient dataset, the paper introduces a new 1M instruction-tuning datset, which contains 344K adversarial samples, 508K diverse samples as well as 165K benchmark training set samples. Based on the dataset, the proposed algorithm, called Robin3D, obtains promising results in the ground task as well as caption task."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Will the 1M 3D instruction dataset be release to the public? The main contribution of the paper lies on the datasets, thus whether the dataset will be released to public is important to evaluate the contribution of the paper.\n2. The dataset seems to be designed specifially for the 3D indoor environment. How about the generation ability of the dataset and the model used for the outdoor environment, like the 3D street?\n3. Is it possible to provide an ablation study on different of training examples? It would be better to know the model performance with different number of training data.\n4. The model is based on Vicuna-7B-v1.5 backbone. How about the performance if other LLM models are utilized? Besides, if larger LLM model is utilized, is a larger training dataset can further boost the performance?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. This paper employs the same detectors and 2D&3D encoders as chat-scene (Mask3D, Uni3D, DINO-v2). What are the significant innovations of this model compared to chat-scene?\n\n2. In Table 1, what do the grey sections referring to \"ground truth question-relative objects annotations\" specifically indicate? Is the explicit positional information P introduced by the dedicated detector Mask3D on the ScanQA test set considered as \"ground truth question-relative objects annotations\"?\n\n3. Results of Baseline(+ RAP & IFB) in Table3 are the same as the benchmark results in Table 2. In the \"ABLATION STUDY\" section, it seems there might be a confusion regarding the order of incorporating modules and datasets. Benchmark(+ Adversarial & Diverse) should include RAP&IFB and encompass all datasets. Why are the results of the ablation study (+ Adversarial & Diverse) inconsistent with the results in Table 1?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The RIG engine's capability to generate adversarial and diverse instruction data significantly enhances the robustness and generalizability of 3DLLMs. The innovative proposal of adversarial data may help mitigate the hallucination tendencies of large models. The collection of diverse instructions, expanded by GPT to enrich the diversity of expressions, may alleviate the issue of rigid model outputs.\n2. The integration of RAP and IFB modules improves the model's spatial understanding and object grounding capabilities.\n3. Robin3D achieves superior performance across multiple benchmarks, showcasing its effectiveness and versatility.\n4. The models are trained on the whole task data, rather than on individual tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce a 3DLLM Robin3D trained on their proposed dataset generated by RIG, a pipeline to acquire diverse and discriminative data. Robin3D incorporates two key modules, the Relation-Augmented Projector (RAP) and ID-Feature Bonding (IFB), to enhance spatial understanding and object grounding. The model demonstrates state-of-the-art performance across five 3D multimodal learning benchmarks without task-specific fine-tuning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The module's innovativeness is found to be lacking: RAP utilizes linear layers to separately connect the 3D features from the scene, individual object 3D features, and positional information features, followed by concatenation. A possible baseline (chat-scene) employs the exact same encoders, using linear layers to connect 3D features and positional features, and then concatenating individual object 3D features. The only modification made is the interchange of inputs to the linear layers. Similarly, IFB introduces an ending ID token to signal the end of an object presentation, followed by a rearrangement of vision tokens and prompt tokens. This method of simply altering the prompt tokens is not particularly innovative.\n\n2. Are the results state-of-the-art (SOTA): The experimental results compared against the Chat-Scene model have been open-sourced and are being continuously updated. Prior to the submission deadline for this conference, the accuracy on the ScanRefer dataset had already surpassed 61% and 55%, outperforming the method proposed in this paper. This paper should have utilized the most recent projects and results as benchmarks; otherwise, the effectiveness of the proposed method cannot be ascertained.\n\n3. Model generality: Mainstream approaches about 3DLLMs typically employ a single, well-trained model to evaluate performance across multiple tasks. The joint training without task-specific fine-tuning method described in the paper does not represent a contribution unique to this work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weakness section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper constructs a large instruction-following fine-tuning dataset containing adversarial and diverse samples. \n2. The zero-shot performance improvement of the trained Robin3D appears evident across various benchmarks and the ablation experiments clearly demonstrate the gains of different designs in the paper.\n3. The writing of the article is fluent and easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper first constructs 1 million instruction-following data, including adversarial samples and diversified samples to bridge the drawbacks of existing 3D MLLM instruction following fine-tuning datasets. To better handle the proposed complex instructions, this paper first incorporates Mask3D and Relation-Augmented Projector to enhance spatial understanding, and then improve the object referring and grounding ability through ID-Feature Bonding. The trained model Robin3D shows superior performance across five widely used 3D multimodal learning benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The related work section lacks clarity on the novelty and advantages of the RAP and IFB modules in comparison to existing studies. \n(1) Explain how object IDs are linked to object features in previous research and discuss the benefits of wrapping these features with identical ID tokens before and after them.\n(2) Describe how earlier studies extract and utilize 3D and 2D features, and highlight the advantages of introducing Mask3D information using RAP.\n\n2. How will the relative proportions of diverse and adversarial samples generated with RIG affect the performance of Robin3D? \nPlease conduct ablation studies to examine and analyze how datasets with varying proportions of adversarial and diverse samples influence Robin3D's performance across different tasks.\n\n\n3. If the dataset constructed in this paper is used to fine-tune existing task-specific or joint trained models, will it provide consistent performance gains? \nThe authors could consider selecting 1 or 2 task-specific and jointly trained models, respectively, and tuning them on the proposed instruction-following tuning dataset to further demonstrate the contribution of this dataset to the community."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024robind,\ntitle={Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5DT0t5NylU},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent advancements in 3D Large Language Models (3DLLMs) have highlighted their potential in building general-purpose agents in the 3D real world, yet challenges remain due to the lack of high-quality robust instruction-following data, leading to limited discriminative power and generalization of 3DLLMs. In this paper, we introduce Robin3D, a powerful 3DLLM trained on large-scale instruction-following data generated by our novel data engine, Robust Instruction Generation (RIG) engine. RIG generates two key instruction data: 1) the Adversarial Instruction-following data, which features mixed negative and positive samples to enhance the model's discriminative understanding. 2) the Diverse Instruction-following data, which contains various instruction styles to enhance model's generalization. As a result, we construct 1 million instruction-following data, consisting of 344K Adversarial samples, 508K Diverse samples, and 165K benchmark training set samples. To better handle these complex instructions, Robin3D first incorporates Relation-Augmented Projector to enhance spatial understanding, and then strengthens the object referring and grounding ability through ID-Feature Bonding. Robin3D consistently outperforms previous methods across five widely-used 3D multimodal learning benchmarks, without the need for task-specific fine-tuning.\nNotably, we achieve a 7.8\\% improvement in the grounding task (Multi3DRefer) and a 6.9\\% improvement in the captioning task (Scan2Cap)."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D Large Language Model",
"3D Multimodal Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/77cbe15e88e27c58b53dd63c7462e33a33c5ed00.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5DUekOKWcS | Asynchronous Federated Reinforcement Learning with Policy Gradient Updates: Algorithm Design and Convergence Analysis | main | Active | Federated Learning;Reinforcement Learning;Asynchronous | reinforcement learning | 3;5;5;5;6 | 4;3;3;3;4 | 2;3;3;3;3 | 2;3;2;2;3 | 2;3;3;2;2 | 4.8 | 3.4 | 2.8 | 2.4 | 2.4 | -0.25 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See my questions from the previous section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Contributions claimed in the paper include,\n\n--Proposes a new asynchronous FedRL algorithm (AFedPG) tailored to policy gradient updates, using a delay-adaptive lookahead technique to manage lagging updates in asynchronous settings.\n\n-- Provides theoretical convergence guarantees, including global and first-order stationary point convergence, for the asynchronous federated policy-based RL.\n\n-- Achieves a linear speedup in sample complexity with an increasing number of agents, reducing the per-agent complexity from $O(\\epsilon^{-2.5})$ to $O(\\epsilon^{-2.5}/N)$. (However, the proof is unclear and it is hard to see how the authors can avoid a dependence on the delay in the sample complexity.)\n\n -- Improves time complexity over synchronous methods by reducing the dependency on the slowest agent’s computational time, with gains highlighted in scenarios of high computational heterogeneity.\n\n-- Empirically validates AFedPG's performance in various MuJoCo environments, demonstrating faster convergence (time-wise) over synchronous FedPG and other baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper aims to enhance the efficiency of federated reinforcement learning (FedRL) by introducing an asynchronous framework, AFedPG, which leverages policy gradient (PG) updates from multiple agents without requiring synchronized updates. This approach is designed to address issues related to delayed updates and computational heterogeneity, which are common challenges in federated setups, especially with varying agent speeds and capacities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In general, the paper is not clearly written. I don't see how the authors were able to avoid a dependence on the delay in their sample complexity. Their current derivations for bounding the error term (from the delay) have many typos and are hard to follow. Specific concerns/questions of the paper include:\n\n-- Step 4 in Algorithm 2 is confusing. Where does the local agent get $d_{k-1}$ from? Did the authors mean $d_{k-\\delta_k}$ instead? If the authors meant $d_{k-1}$, the current algorithm descriptions do not mention how $d_{k-1}$ can be made available to agent $i$.\n\n-- A major component of the proof is bounding the error term $e_k := d_{k-\\delta_k} - \\nabla J(\\theta_k)$, which arises from the delay. Equation (30) in the appendix provides a derivation of how $e_k$ can be expressed (and subsequently bounded). However, there seems to be serious typos in equation (30). For instance, in the first line, I am not sure why a term $d_{\\delta_{k-1}}$ appears, when $e_k$ is actually $d_{k-\\delta_k} - \\nabla J(\\theta_k)$. This makes it difficult to follow the argument in this derivation, and there is also no explanation of the derivation, which might have made it easier to follow the argument flow. Given that this is a particularly important term to bound to derive either first-order or global convergence rates, the authors should make an effort to clarify and explain these derivations.\n \n-- The current convergence bound seems to have no dependence on the delay in the network, which is $N$ in the worst-case (e.g. assuming cyclic update). This is somewhat confusing to me; intuitively, even with a delay-adaptive step size for the $\\theta$ update, there should be some price to pay for a cyclic delay structure. My current understanding is that perhaps the authors were able to bypass the dependence on the delay by their handling of the gradient-bias term $e_k$ (caused by the delay). However, given that the current derivation of bounding $e_k$ is highly unclear (see my earlier point), it is not clear to me whether the result as currently stated actually holds. If it holds, the authors should make it a lot clearer how and why they are able to avoid the dependence on the delay, as this is a key part of their contribution. \n\n-- The definition of the global time is unclear. The authors should make it more precise, and have a formal statement and proof of their current stated bound on the global time being $O(\\frac{\\bar{t}\\epsilon^{-2.5}}{N})$, where $\\bar{t} = \\frac{1}{\\sum_{i=1}^N \\frac{1}{t_i}}$. \n\n--On a related note, the definition of $t_i$ seems a little unclear to me, given that at different iterations, agent $i$ might require varying amounts of time (i.e. there shouldn't be a single time complexity $t_i$ for each agent $i$). The authors should make their definition of what they mean by $t_i$ more precise."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "4. Why does Proof of theorems lack the index of agent i? Since the server does not aggregate gradients or parameters from agents periodically, Fed RL is not applicable in this paper. Besides, it is just similar to [3]. Notations also make confusing. \n\n5. What’s the technical contributions beyond existing FedRL? Technical differences of AFedPG compared to FedPG seems limited.\n\n6. Authors first get the results of global convergence, then FOSP results. Why FOSP results are placed first in main text?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Convergence results are provided. \n2. Asynchronous federated reinforcement learning framework is proposed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an asynchronous federated reinforcement learning framework. Then it introduces a delay-adaptive lookahead technique and employs normalized updates to integrate policy gradients to deal with the challenges brought by the asynchrony. Furthermore, the paper provides the theoretical global convergence bound. The experiments verify the improved performance of the proposed algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper is not built on a federated framework. FedRL is designed to address heterogeneous environments and allow local agents to perform multiple iterations [1,2]. However, these are not considered in this paper.\n\n[1] Momentum for the Win: Collaborative Federated Reinforcement Learning across Heterogeneous Environments, ICML24.\n[2] Federated Reinforcement Learning with Environment Heterogeneity, AISTATS22.\n\n2. This work lack necessary comparisons with current works. Actor-critic is a policy-based approach. This paper needs careful comparisons in details with [3] since both emphasize the asynchrony, not mentioned in Introduction briefly.\n[3] Towards understanding asynchronous advantage actor-critic: convergence and linear speedup.\n\n3. Technical contributions are limited. Authors claimed that even if all agents have an identical environment, each agent collects samples according to different policies because of the delay. This dynamic nature makes both the problem itself and the theoretical analysis challenging. However, this is somehow solved by [3]. The challenges brought by the features of Fed RL are not considered in this paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The work provides asynchronous synchronization updates tailored for federated RL.\n2. The work presents a tight sample complexity analysis of the proposed algorithm, demonstrating a linear speedup that aligns with the single-agent state-of-the-art."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work investigates federated reinforcement learning with asynchronous synchronizations to improve the time complexity. They introduce the asynchronous federated policy gradient (AFedPG), which tackles lagged policies using a delay-adaptive lookahead. In addition, they present a sample complexity analysis of the algorithm, demonstrating a linear speedup compared to the single-agent scenario."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The application of asynchronous updates from federated learning to federated policy gradients appears to be incremental, especially since much of the supervised federated learning literature has examined how to manage lagged models, while existing federated reinforcement learning research focuses on addressing the dynamic nature of reinforcement learning in federated settings.\n2. It appears that a momentum method was introduced for federated policy gradients in heterogeneous environments to handle online sample collections dependent on $\\theta$ in [1]. While the paper emphasizes its novelty by discussing the momentum design (delay-adaptive lookahead), which differs from asynchronous supervised federated learning, it remains uncertain whether this concept is genuinely unique in comparison to prior literature in federated reinforcement learning, which also addresses the issue of online sample collections that vary with policy updates.\n\n[1] Momentum for the Win: Collaborative Federated Reinforcement Learning across Heterogeneous Environments, Wang et al., ICML 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* In Line 268, you mention *the set of active agents*. Does it mean the agents that can apply global iteration? If so, then the following paragraph mentions that *only one gradient to update the model from the agent who has finished its local computation.* In other words, does it allow more than one agent to apply policy gradient at the same iteration?\n* For Figure 3, could you please let PG (N=1) and AfedPG (N=2) train even longer to see if they can converge to a similar reward as the other two? If they cannot, I feel curious as to why they can't. \n* Is there any analysis or experiment of communication cost?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* Numerical experiments on MuJoCo demonstrate impressive results that support the better time complexity of the proposed method\n* Both FOSP and global sample complexity match the state-of-the-art while the global time complexity can have a tighter bound with heterogeneous arrival times"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a policy-based federated RL with an asynchronous setting to handle varying arrival times of policy gradient updates. Specifically, the authors analyzed the global and FOSP sample complexity as well as time complexity with a concrete algorithm design. The authors also provided simulation results on MuJoCo, which tackle sample and time complexity issues separately. The proposed method is more practical and can be adaptable to various computing heterogeneity scenarios."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The ultimate goal of federated RL is to find the trade-off between sample and communication complexity while the emphasis of this work on communication complexity/strategy is limited and not clear to me. Please elaborate more about what the threshold or event triggered for any agent to have the synchronization/communication with the server in your proposed framework.\n* There are some typos in the manuscript. For example, you write *MoJuCo* instead of *MuJoCo* in the caption of Figures 3 and 4."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed framework handles the delayed arrival of policy-gradient and reduces the waiting time compared to the algorithm for the homogeneous setting.\n\n2. The authors propose their special step size designs to cancel out a second-order error term when conducting the error analysis, which serves as a technical novelty.\n\n3. Numerical experiments demonstrate that the authors accelerate the training process compared to the synchronous algorithm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an asynchronous federated reinforcement learning framework termed AFedPG for the policy gradient algorithm. It designs a delay-adaptive lookahead technique that can effectively handle heterogeneous arrival times of policy gradients. This work shows theoretical linear speedup in terms of the norm for policy gradient and verifies the speedup effect numerically."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Issues in Section 4. The authors are encouraged to explain more about the concepts of active agents, concurrency, and delay. In algorithm 2, the authors are encouraged to explain more details about model sharing from the central server as how the agents hold $d_{k-1}$ and $\\theta_{k-1}$ is not explicitly explained. In addition, the authors are encouraged to explain the relationship between their algorithms and the single-agent and homogeneous counterparts in the literature. Last, the authors assume that the agents can sample a trajectory with infinite lengths, which is impossible in practice. The authors are recommended to explain more on such assumptions.\n\n2. Issues in Section 5. (a) In equations 10 and 11, RHS contains a constant term that does not depend on $K$, which originates from the function approximation error as indicated in the appendix. The authors are encouraged to explain this term in the main paper. (b) The authors are encouraged to explain how they get the total waiting time in line 394.\n\n3. Issues in Appendix B (proofs). (a) The authors are encouraged to explain more about the definitions and notations that are already established in the literature, for example, $F_\\rho(\\theta),\\mu_F,\\sigma_g$. (b) In Lemmas B.6 and B.7, the authors are recommended to point out the cited lemma in the references. (c) The second term in line 1084 should be $(\\mathbb{E}\\cdot^2)^{1/2}$. (d) In equations 37 and 38, there are typos related to $\\nabla$. (e) In line 1028, there is a typo related to $d_{\\delta_{k-1}}$."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a asynchronous federated reinforcement learning framework, which constructs a global policy through collaboration among $N$ agents using policy gradient (PG) updates. It improves both sample and time complexity."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024asynchronous,\ntitle={Asynchronous Federated Reinforcement Learning with Policy Gradient Updates: Algorithm Design and Convergence Analysis},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5DUekOKWcS},\nnote={under review}\n}"
},
"abstract": {
"value": "To improve the efficiency of reinforcement learning (RL), we propose a novel asynchronous federated reinforcement learning (FedRL) framework termed AFedPG, which constructs a global model through collaboration among $N$ agents using policy gradient (PG) updates. To address the challenge of lagged policies in asynchronous settings, we design a delay-adaptive lookahead technique \\textit{specifically for FedRL} that can effectively handle heterogeneous arrival times of policy gradients. We analyze the theoretical global convergence bound of AFedPG, and characterize the advantage of the proposed algorithm in terms of both the sample complexity and time complexity. Specifically, our AFedPG method achieves $\\mathcal{O}(\\frac{{\\epsilon}^{-2.5}}{N})$ sample complexity for global convergence at each agent on average. Compared to the single agent setting with $\\mathcal{O}(\\epsilon^{-2.5})$ sample complexity, it enjoys a linear speedup with respect to the number of agents. Moreover, compared to synchronous FedPG, AFedPG improves the time complexity from $\\mathcal{O}(\\frac{t_{\\max}}{N})$ to $\\mathcal{O}({\\sum_{i=1}^{N} \\frac{1}{t_{i}}})^{-1}$, where $t_{i}$ denotes the time consumption in each iteration at agent $i$, and $t_{\\max}$ is the largest one. The latter complexity $\\mathcal{O}({\\sum_{i=1}^{N} \\frac{1}{t_{i}}})^{-1}$ is always smaller than the former one, and this improvement becomes significant in large-scale federated settings with heterogeneous computing powers ($t_{\\max}\\gg t_{\\min}$). Finally, we empirically verify the improved performance of AFedPG in four widely-used MuJoCo environments with varying numbers of agents. We also demonstrate the advantages of AFedPG in various computing heterogeneity scenarios."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Federated Learning",
"Reinforcement Learning",
"Asynchronous"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/bf6c30f2c6cac8db3a53774e3828ce270be9907e.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/cf958374610169605ee851cde1bca686e5b73bae.zip"
},
"title": {
"value": "Asynchronous Federated Reinforcement Learning with Policy Gradient Updates: Algorithm Design and Convergence Analysis"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5E6VOD7W0z | On Erroneous Agreements of CLIP Image Embeddings | main | Active | Multimodal Learning;CLIP;LLaVA;cosine similarity;erroneous agreement | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;5;5;5 | 5;4;3;2 | 2;3;2;3 | 2;3;2;2 | 2;3;3;3 | 4.5 | 3.5 | 2.5 | 2.25 | 2.75 | -0.774597 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I recommend including Spearman's rank correlation coefficient in Table 1 to enhance the analysis. Additionally, a more comprehensive study would be valuable. For example, could the authors provide Spearman's rank correlation coefficient and cosine similarity for the questions with the highest- and lowest-accurate answers?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper provides some interesting insights to show that the metric commonly used to measure the embedding similarity (Cosine Similarity) does not depict all aspects of vector pairs. Therefore, it suggested a complementary metric, Spearman’s rank correlation coefficient. However, table 1 only provides the average Cosine Similarity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a comprehensive study to analyze the answers supplied by the VLMs. Specifically, It compares the performances of CLIP and LlaVa-1.5-7B in the What’s Up and MMVP benchmarks. These benchmarks ask questions about a pair of images that contain the same objects and background but in different positions. This paper shows that the LlaVa-1.5-7B can perform better than CLIP in these benchmarks even when LlaVa uses CLIP as a visual encoder, and the average cosine similarity of the CLIP embedding of the image pair is greater than 0.95. Moreover, it provides ablation studies to explain this behavior."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is challenging to follow, primarily due to the absence of a clear statement of its main contributions in the Introduction. Its content closely parallels the CVPR24 paper, \"Eyes Wide Shut? Exploring the Visual Shortcomings of Multimodal LLMs,\" raising concerns about the originality of this work. The CVPR24 paper highlights that Visual Language Models (VLMs), often relying on CLIP as the visual encoder, struggle with recognizing fine-grained details, such as object locations. It introduces the MMVP benchmark to evaluate these limitations comprehensively. I encourage the authors to clarify how their contributions provide novel insights beyond this existing research."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How do these findings generalize to other MLLMs beyond LLaVA-1.5?\nWhat specific mechanisms allow MLLMs to extract distinct information from seemingly similar embeddings?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Provides compelling empirical evidence through controlled experiments across multiple benchmarks.\nChallenges and refines an important assumption in the field about VLM limitations.\nDemonstrates that existing architectures might be more capable than previously thought, just requiring better utilization strategies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper challenges the prevailing belief that Vision-Language Models' (VLMs) failures in visual reasoning are primarily due to CLIP image encoder's \"erroneous agreements\" (where distinct images have high cosine similarity). Using LLaVA-1.5-7B as an example, they demonstrate that MLLMs can successfully extract distinct information from similar image embeddings, achieving high accuracy on tasks where CLIP performs poorly. This suggests that the limitation lies not in the image embeddings themselves, but in how effectively models extract and utilize the encoded information."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper's scope might be too focused on LLaVA-1.5 as the primary example, potentially limiting the generalizability of findings\nWhile the paper shows that information can be extracted from similar embeddings, it doesn't fully tackle why LLaVA-1.5 is able to do this."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In general the second weakness is the biggest to me. I would like to hear what the authors say on this?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper delivers a valuable message to the community by advocating for enhancing Multimodal LLMs and keeping the image encoder fixed. Previous research suggested that the image encoder introduced issues by producing \"erroneous agreements\" (similar embeddings for semantically similar but visually distinct images). However, this paper counters that claim, attributing the problem instead to the the model not utilizing these visual features effectively. \n\n- Interesting observation of better decoding algorithms and methods for evaluating specific tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper examines the performance of LLaVA-1.5-7B on visual reasoning tasks, specifically WhatsUp and MMVP, and concludes that its suboptimal performance is not due to CLIP's visual features. While CLIP visual features effectively capture semantic similarities, they occasionally misinterpret spatial differences in object placement (e.g., \"mug on the left\" vs. \"mug on the right\"), which results in high cosine similarity (over 0.95) despite subtle image differences—referred to as \"erroneous agreements.\" The authors show that CLIP’s visual features are accurate; instead, they attribute the performance issues to LLaVA not making effective use of these features. They further demonstrate that poor alignment between visual and textual inputs, not the visual features themselves, explains the bad performance in CLIP models for these tasks and datasets. Unlike CLIP, LLaVA does not exhibit this alignment problem, and this is shown quantitatively. Finally, the authors try better decoding strategies in Llava like M3ID such that the decoding better makes use of the visual features. They also show that multiple image inputs works better to highlight the difference in images. They also explore performance gaps related to evaluation methods, training data, and the text encoder."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There is an incoherent story. The abstract initially suggests that LLaVA performs well on reasoning tasks and achieves high accuracy, yet later the paper claims LLaVA performs poorly on MMVP, contradicting the initial statement. They also mention that LLava is able to extract the correct information from the visual features, and that it does not face issues (L186, and demo image). Only later is it clarified that LLaVA performs well on WhatsUp but not on MMVP. In general, I feel there is an unclear and confusing story. \n\n- WhatUp, MMVP, COCO-spatial and GQA-spatial are not really well-known datasets and publicly-agreed on to measure reasoning. I actually came to know them after reading this paper. Measuring reasoning on MMLMs are usually not done on these datasets. These datasets are not enough to reflect model reasoning and to come up with general conclusions about LLava or MMLMs in general. The authors don’t show ablation and analysis results using their ablation strategies, on important reasoning tasks such as VQA, GQA, OK-VQA, VCR and others (specifically, those that LLava reports on). I feel the scope, task and datasets are not enough to reach the standard required for ICLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Would a high negative Spearman's rank correlation show that the embeddings are quite different? \n\nLN 232-236 says: \"While SC (fv(v1), fv(v2)) > 0.989, Spearman’s rank correlation coefficient can tell their sharp difference: ρ = −1, showing that they are fully opposed in this sense. Therefore, the difference in visual inputs might still be extracted through other means when erroneous agreements occur\"\n\nHow does ρ = −1 show that the embeddings are 'fully opposed'? If the authors could show this or cite a paper that shows this, that would be great."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Important analysis shown in section 4 (Investigating the performance gap)- this section answers the questions related to training data, language model and evaluation method. This analysis is important to make the claim that visual information extraction is the key factor in determining the performance gap on downstream tasks. And these other factors (eval method, language encoder, training data) are not contributing much to the improved performance.\n\n2. Detailed benchmarking of the models on different datasets and good ablation studies.\n\n3. They show, using a different decoding method, that even with a fixed pre-trained image encoder if we try to 'force' VLMs to attend to visual features while decoding (and not just relying on language priors), we can perform good on downstream visual reasoning tasks. Although they used a previously proposed decoding strategy M3ID (Favero et al., 2024)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Previous works have argued that the poor performance of VLMs on simple visual reasoning tasks is due to their dependence on CLIP encoder. They show that CLIP can encode two visually different images with high cosine similarity (called erroneous agreement) and argue that many VLMs fail due because they use CLIP as their vision encoder.\n\nIn this paper the authors show that with better extraction and utilization methods, clip encoder can still be used for downstream tasks of visual reasoning. They show experiments with LLaVA-1.5 and show that it performs good on benchmarks despite using CLIP as its vision encoder."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors show that the visual feature extraction technique in LLaVA (a two layer MLP) is an important step in distinguishing between two erroneous images. But they do not provide an convincing argument on why is it an important step. An analysis on \"why just adding a 2-layer MLP on top of pre-trained CLIP makes it so much better?\" would have been an amazing addition to the paper. \n\n2. On Spearman's rank correlation (also asked in the questions): Since CLIP is trained using loss based on cosine similarity, I think using Spearman's rank correlation to show that two embeddings are \"fully opposed\" is not correct. For example, consider the example given on LN 232-233. Although the ranks of the dims are reversed giving ρ = −1, their absolute values are pretty close. And if we assume (in an ideal world) them to be separable features, for example the embeddings could be of dog images and the features are 'ear-length' , 'fur color', 'nose-shape', both the embeddings will still show two very similar looking dogs (and not 'fully opposite') even though the embedding might have ρ = −1."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We provide evidence that Vision-Language Models face challenges beyond erroneous agreements, in that the visual information might still be in the CLIP image embeddings but a better extraction and utilization strategy is required to pull it out."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024on,\ntitle={On Erroneous Agreements of {CLIP} Image Embeddings},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5E6VOD7W0z},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent research suggests that the failure of Vision-Language Models (VLMs) in visual reasoning could be attributed to the CLIP image encoder ambiguously encoding distinct images into embeddings with high cosine similarity, namely *erroneous agreements*. In this paper, we show that they are not the sole issue, as multimodal large language models (MLLMs) may extract distinct information even from image embeddings with high cosine similarities. On Subset A of the What'sUp benchmark, where the Left/Right image pairs are embedded by CLIP with average cosine similarity greater than 0.99, CLIP's performance is near random guess. In contrast, LLaVA-1.5-7B, which uses the same image encoder as CLIP, achieves nearly 100\\% accuracy. This discrepancy is also observed between LLaVA-1.5-7B and CLIP-like models on similar benchmarks. To investigate this performance gap, we conduct controlled experiments to test the effect of varying evaluation methods, training data, and language processing choices. We find that the CLIP image embeddings contain more extractable information than previously suggested, but it is likely obscured by the inadequate vision-language alignment of the CLIP's paradigm. Motivated by this observation, we reconsider the LLaVA-1.5 model on the MMVP benchmark, for which prior work showed that it could not distinguish image pairs with high cosine similarity. We observe a performance gain brought about by an alternative decoding algorithm, which attends more to visual input. Further, we show that the accuracy significantly increases if the model can take both images as input to emphasize their nuanced differences. Both findings indicate that LLaVA-1.5 did not utilize extracted visual information sufficiently. In conclusion, our findings suggest that while improving image encoders could benefit VLMs, there is room to enhance the models with a fixed image encoder through better strategies for extracting and utilizing visual information."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multimodal Learning",
"CLIP",
"LLaVA",
"cosine similarity",
"erroneous agreement"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3a6027cb8e7ac3ebd93ffc2b3d3a28045fc62b66.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/800cc8e3432cb41e75002362e4ec64c18c04c010.zip"
},
"title": {
"value": "On Erroneous Agreements of CLIP Image Embeddings"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5ECUAQJUuq | AdvLoRA: Adversarial Low-Rank Adaptation of Vision-Language Models | main | Withdraw | Vision-Language Models;Adversarial Training;Parameter-efficient Adaptation | alignment, fairness, safety, privacy, and societal considerations | Yuheng Ji;Yue Liu;Zhicheng Zhang;Zhao Zhang;Yuting Zhao;Gang Zhou;Xingwei Zhang;Xinwang Liu;Xiaolong Zheng | ~Yuheng_Ji1;~Yue_Liu10;~Zhicheng_Zhang6;~Zhao_Zhang13;~Yuting_Zhao2;~Gang_Zhou6;~Xingwei_Zhang1;~Xinwang_Liu1;~Xiaolong_Zheng4 | 3;3;5;5 | 4;5;4;3 | 2;2;4;4 | 2;2;2;3 | 1;1;4;4 | 4 | 4 | 3 | 2.25 | 2.5 | -0.707107 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- What is the purpose of Eq.8-12?\n- How to choose the parameter $\\alpha$?\n- Does the ADVLORA work on other types of VLM?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper investigated an important problem and proved that the proposed ADVLORA can improve the adversarial robustness of BLIP-like VLMs in a parameter-efficient manner."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper focuses on the adversarial robustness of VLMs during PEFT. The authors improve the efficiency and robustness of adversarial adaptation by designing a reparametrizing method based on parameter clustering and parameter alignment."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-\tThe novelty is very limited since the ADVLORA is proposed by combining adversarial training and LORA. Also, the proposed parameter clustering is not well-motivated.\n-\tThe pipeline of ADVLORA is unclear. I hope that the authors could further clarify the purpose of Eq.8-12. Are they used for initialization or updated in each iteration?\n-\tHow to choose the parameter $\\alpha$, which is newly introduced compared to the original LORA.\n-\tThe authors only investigate BLIP, whereas, there are many other VLMs, like CLIP.\n-\tThe citation format should be revised. And there are many typos, such as “Eq. equation” in Algorithm1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Why does AB need to be aligned with W_0 ?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1.\tThis paper presents AdvLoRA, a novel parameter-efficient adversarial adaptation method that improves the adversarial robustness of vision-language models (VLMs) through low-rank adaptation, representing an interesting avenue for research.\n2.\tThe paper presents comparative results across some mainstream datasets.\n3.\tThe method proposed in this paper is practical and applicable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a parameter-efficient adversarial adaptation method called AdvLoRA, based on Low-Rank Adaptation. Initially, they investigate and reveal the intrinsic low-rank properties present in adversarial adaptation for vision-language models (VLMs). Unlike LoRA, AdvLoRA enhances the efficiency and robustness of adversarial adaptation through a novel reparameterization method that leverages parameter clustering and alignment. Additionally, an adaptive parameter update strategy is introduced to further enhance robustness. With these innovations, AdvLoRA addresses issues related to model security and excessive resource consumption. Extensive experiments demonstrate the effectiveness and efficiency of AdvLoRA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe comparison between the proposed method and existing adversarial robustness techniques is insufficient, particularly regarding performance across different attack types. \n2.\tIn the absence of an analysis of the proposed method's efficiency, clustering may be theoretically time-consuming.\n3.\tAblation experiments should be a key component of the study, as it is crucial to evaluate the effectiveness of each module of the proposed method. The current content does not adequately demonstrate the method's effectiveness and lacks a detailed comparative analysis.\n4.\tThe reparameterization method lacks theoretical support."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See Weaknesses.\nI would like to see the authors provide further clarification on the contributions of their work to confirm whether my understanding is correct."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper provides a detailed introduction to the method, making it easy to understand. \n\nIt also conducts numerous experiments to demonstrate the effectiveness of the approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a LoRA-based adversarial training method for visual language models. Unlike directly using LoRA, this method improves the efficiency and robustness of adversarial adaptation by designing a novel reparameterization method based on parameter clustering and parameter alignment. Through extensive experiments, the article demonstrates the effectiveness of AdvLora."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In terms of writing, the entire paper seems to not use the correct citation format; the ICLR template should utilize \\citep. Therefore, a thorough review and verification of the paper are necessary to meet writing standards.\n2. In lines 177-181, L has not used cross-referencing \\ref.\n3. It is a well-known fact that using adversarial samples for adversarial training can degrade model performance, and the introduction of Table 1 is not very clear regarding which model was trained.\n4. If I am not mistaken, AdvLora seems to only improve the initialization of LoRA, which makes its contribution appear relatively small.\n5. It is necessary to compare this method with other adversarial training approaches, such as RobustCLIP[1].\n\n[1] Schlarmann, Christian, et al. \"Robust clip: Unsupervised adversarial fine-tuning of vision embeddings for robust large vision-language models.\" arxiv preprint arxiv:2402.12336 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please see the weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1.\tThe writing is clear. The formulas are correct.\n2.\tThe experiment is abundant and multi-dimensional.\n3.\tThe research topic is important for VLM."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a parameter-efficient method to enhance the adversarial robustness of VLMs. Traditional adaptation methods like full fine-tuning and LoRA are vulnerable to adversarial attacks, leading to performance drops. AdvLoRA improves robustness by utilizing low-rank adaptation, parameter clustering, and adaptive update strategies, reducing computational costs. Experiments show that AdvLoRA outperforms other methods, especially in adversarial scenarios."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tWhile the method is effective, there is no analysis explaining the necessity of reparameterization.\n2.\tThe rationale behind using clustering to establish a connection with the parameter in W is insufficiently analyzed.\n3.\tThe justification for employing an adaptive update parameter is also lacking."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nji2024advlora,\ntitle={AdvLo{RA}: Adversarial Low-Rank Adaptation of Vision-Language Models},\nauthor={Yuheng Ji and Yue Liu and Zhicheng Zhang and Zhao Zhang and Yuting Zhao and Gang Zhou and Xingwei Zhang and Xinwang Liu and Xiaolong Zheng},\nyear={2024},\nurl={https://openreview.net/forum?id=5ECUAQJUuq}\n}"
},
"abstract": {
"value": "Vision-Language Models (VLMs) are a significant technique for Artificial General Intelligence (AGI). With the fast growth of AGI, the security problem become one of the most important challenges for VLMs. In this paper, through extensive experiments, we demonstrate the vulnerability of the conventional adaptation methods for VLMs, which may bring significant security risks. In addition, as the size of the VLMs increases, performing conventional adversarial adaptation techniques on VLMs results in high computational costs. To solve these problems, we propose a parameter-efficient \\underline{Adv}ersarial adaptation method named \\underline{AdvLoRA} by \\underline{Lo}w-\\underline{R}ank \\underline{A}daptation. At first, we investigate and reveal the intrinsic low-rank property during the adversarial adaptation for VLMs. Different from LoRA, we improve the efficiency and robustness of adversarial adaptation by designing a novel reparameterizing method based on parameter clustering and parameter alignment. In addition, an adaptive parameter update strategy is proposed to further improve the robustness. By these settings, our proposed AdvLoRA alleviates the model security and high resource waste problems. Extensive experiments demonstrate the effectiveness and efficiency of the AdvLoRA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Yuheng_Ji1",
"~Yue_Liu10",
"~Zhicheng_Zhang6",
"~Zhao_Zhang13",
"~Yuting_Zhao2",
"~Gang_Zhou6",
"~Xingwei_Zhang1",
"~Xinwang_Liu1",
"~Xiaolong_Zheng4"
]
},
"authors": {
"value": [
"Yuheng Ji",
"Yue Liu",
"Zhicheng Zhang",
"Zhao Zhang",
"Yuting Zhao",
"Gang Zhou",
"Xingwei Zhang",
"Xinwang Liu",
"Xiaolong Zheng"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Vision-Language Models",
"Adversarial Training",
"Parameter-efficient Adaptation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "ji|advlora_adversarial_lowrank_adaptation_of_visionlanguage_models"
},
"pdf": {
"value": "/pdf/d3e40f38df5a28d3576b2f5f670e1b3d87926ead.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "AdvLoRA: Adversarial Low-Rank Adaptation of Vision-Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
5EuAMDMPRK | POROver: Improving Safety and Reducing Overrefusal in Large Language Models with Overgeneration and Preference Optimization | main | Active | LLM safety;LLM usefulness;Overrefusal in LLMs;responsible AI | alignment, fairness, safety, privacy, and societal considerations | 3;5;6;6 | 4;3;4;3 | 2;2;2;3 | 2;2;2;3 | 2;2;2;4 | 5 | 3.5 | 2.25 | 2.25 | 2.5 | -0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* Although experiments on models with different scales were not included, how do you expect the models to behave, assuming that they come from the same family? Would the benefits saturate as the number of parameters increases?\n* How sensitive is the proposed method in the choice of the hyperparameter $\\tau$? Were the results vastly different accross your grid search?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The paper tackles an important aspect of LLMs, aiming to investigate and improve their tradeoff in mainting (or even improving) their safety, without, however, undermining their usefulness due to overrefusal.\n* The experiments presented are extensive; the effectiveness of the presented method has been evaluated on a variety of datasets and benchmarks related to safety and overrefusal.\n* The experiments suggest that the proposed framework effectively results in a balance between usefulness and safety, without significantly undermining the general capabilities of the tested model."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a framework that aims to reduce overrefusal in Large Language Models (LLMs), while improving their safety. It involves finetuning on overgenerated training data from teacher models, such as GPT-4o, and preference optimization to guide models to respond in benign (but possibly seemingly toxic) prompts. Through experiments on Phi-3 7B, and various teacher models, the authors find that their method achieves significant reduction in overrefusal, while maintaining a balance between usefulness and safety."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* As the authors acknowledge, a limitation of their study is that the proposed framework is only tested on a single model family and size (e.g., Phi-3 7B). In my opinion, while the results are promising, this limitation is significant; given that the framework relies on finetuning and preference optimization of pretrained models, testing it across diverse model families and scales would prove its effectiveness and generality. It is unclear to me whether the results would be similar in that case.\n* Adding more fine grained experiments on the number of Added Safety Data (ASD) would make the claim that **the proposed method is effective without undermining the general abilities of the tested model** more convincing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Looking at Figure 5, it looks like there was little improvement on XSTest performance and a reduction in over-refusal for ORBench but no improvement in safety. Why do you think this is? Is it related to the training set being ORBench?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper tackles an important problem of making models safer.\n2. The paper evaluates on multiple benchmarks with different step counts to give broader analysis.\n3. The paper's algorithm seems straightforward to implement."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is concerned with training language models that output safe content but do not refuse too often. They test two algorithmic techniques to achieve this. First, they use overgeneration, which involves sampling multiple possible outputs and choosing the best responses for training. Second, they generate preference data pairs, based on responses that were unsafe/over-refusal vs not."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe the algorithms in this work have limited novelty. Rejection sampling and preference optimization are some of the most used tools for current fine-tuning and safety alignment, so the paper needs to provide novel analysis instead.\n2. I'm confused about the empirical gains. It seems that in Table 1, the random selection GPT-4o baseline performs on par with the rejection sampling, indicating that the filtering step is not that crucial. Moreover, in Figures 3 and 4, training on GPT-3 seems to be extremely safe (though it does not solve over-refual). \n\nIn general, I suggest focusing on key empirical takeaways, ensuring that POROver improves upon simple baselines, and organizing the presentation of the results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) Do these results replicate on a different model other than Phi-3? It would be especially convincing if it replicates on different sizes of the llama-3 family of models. \n\n2) Is it possible that Phi-3 already has safety training that's particularly prone to over-refusing?\n\n3) I interpret ASD to be the number of datapoints added after rejection sampling. Is this correct? If so, this correlates with the extent the model is deviated from the base model.\n\n4) It's not clear where 15%, 45.5%, and 95.5% come from in the abstract."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1) The approach shows strong empirical improvement and scopes a relevant problem of over-refusing. \n\n2) Although the results are shown only on the 7B Phi-3 model, it's done on a variety of seemingly-toxic datasets. \n\n3) The results are supported by human annotations in appendix C.\n\n4) The paper speaks to the trade off on the amount of safety training data needed to achieve the level of desired safety. This is defined in terms of additional safety datapoints."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the impact of rejection sampling for safety training on the model's tendency to over-reject. The results show in the student teacher setting distilling from a stronger model like GPT-4 to Phi-3 the over-refusal reduces from near 100% to 45% on OR-bench."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The approach involves distilling from already safety trained models. In particular, safety trained models that are also likely targeting similar datasets. The work shows gpt3.5 vs gpt4, but it would be more convincing to show Llama-3 as the teachers also, or an somewhat unsafe teacher model. \n\n2) Added Safety Data (ASD) is only evaluated at three levels 0, 2K, and 20K. More data would be needed to defend the claim that there is a tradeoff between ASD and safety. I would expect it to saturate based on the amount of base diversity represented in the prompts. \n\n3) The figures have misleading axis starting at 85% to 100% for instance in Figure 4. This makes the difference look bigger than it is."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Can you provide more insight into why advanced teacher models require more safety examples? Is this related to the complexity of their responses or other factors?\n\n2. How do you expect the observed trends to scale with different model sizes? Would smaller or larger models show similar patterns?\n\n3. Could you elaborate on how different rejection sampling criteria were selected? Were other criteria considered?\n\n How sensitive are the results to the specific thresholds used in rejection sampling?\n\n4. Could the authors expand on the ethical implications of their work, particularly regarding the balance between user freedom and model safety?\n\n5. How do the results of POROver compare to other existing methods for improving LLM safety and reducing overrefusal? Are there any specific scenarios where POROver outperforms or falls short of other approaches?\n\n6. Have you explored automated methods for tuning the containment threshold τ?\n\n Were other preference optimization methods considered besides DPO?\n\n How does the slight safety compromise in OR-Bench Toxic relate to the containment threshold?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper introduces a novel approach to reducing over refusal in LLMs through overgeneration and preference optimization, which is a creative solution to a common problem in the field. The paper is well-written and the results are clearly presented, making it easy to follow the authors' reasoning and findings. The work addresses a critical issue in the deployment of LLMs, improving their safety without compromising their usefulness, which has significant implications for real-world applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper titled \"POROVER: IMPROVING SAFETY AND REDUCING OVERREFUSAL IN LARGE LANGUAGE MODELS WITH OVERGENERATION AND PREFERENCE OPTIMIZATION\" presents a comprehensive study on enhancing the safety and reducing overrefusal in large language models (LLMs). The authors examine the impact of overgenerating training data using advanced teacher models on the safety and usefulness balance of instruction-following language models. They introduce POROver, a strategy that employs preference optimization methods to reduce overrefusal by leveraging completions from superior teacher models. The study demonstrates significant improvements in the F1 score between safety and usefulness, and a substantial reduction in overrefusal rates."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper primarily focuses on a single model size and family (Phi-3), which limits the generalizability of the findings. While the authors acknowledge this limitation, the lack of experimentation with different model scales makes it difficult to understand how these methods would perform across the spectrum of model sizes. This is particularly important given that safety and overrefusal behaviors often vary significantly with model scale. Including experiments with both smaller (3-4B) and larger (70B+) models would provide stronger evidence for the method's broad applicability. The paper's evaluation methodology relies heavily on automatic metrics and a limited set of benchmarks. While the chosen benchmarks (e.g., OR-Bench, XSTest) are relevant, they may not capture the full spectrum of real-world scenarios where safety and overrefusal matter. Including evaluations on more diverse datasets, particularly those featuring different languages, cultures, and domain-specific contexts, would strengthen the paper's conclusions about the method's effectiveness.\n\n- The computational analysis of the proposed methods is notably absent from the paper. The overgeneration approach with GPT-4 as a teacher model likely incurs significant computational costs, yet there's no discussion of the training efficiency or resource requirements. This omission makes it difficult for practitioners to assess the method's feasibility in production environments. A detailed analysis of computational overhead compared to standard fine-tuning approaches would be valuable.\n\n- The paper lacks a thorough comparison with existing safety and overrefusal reduction methods. While baseline comparisons are provided, the authors don't fully contextualize their results within the broader landscape of recent work on LLM safety alignment. A more comprehensive comparison with methods like constitutional AI, RLAIF, and other preference optimization approaches would better demonstrate the advancement over state-of-the-art.\n\n- The robustness of the proposed method requires more thorough investigation. The paper doesn't examine how the method performs under adversarial conditions or when faced with edge cases. Additionally, there's no analysis of the consistency of results across multiple training runs or different random seeds. This makes it difficult to assess the reliability and stability of the approach in practice. The ethical implications of reducing overrefusal deserve deeper examination. While the paper successfully demonstrates technical improvements in reducing overrefusal, it doesn't adequately address the broader implications of making models more compliant."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper examines the impact of using superior language models as teachers on the safety-usefulness trade-off in student models, and explores the use of preference optimization methods to reduce overrefusal."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024porover,\ntitle={{PORO}ver: Improving Safety and Reducing Overrefusal in Large Language Models with Overgeneration and Preference Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5EuAMDMPRK},\nnote={under review}\n}"
},
"abstract": {
"value": "Balancing safety and usefulness in large language models has become a critical challenge in recent years. \nModels often exhibit unsafe behavior or adopt an overly cautious approach, leading to frequent overrefusal of benign prompts, which reduces their usefulness. \nAddressing these issues requires methods that maintain safety while avoiding overrefusal. \nIn this work, we examine how the overgeneration of training data using advanced teacher models (e.g., GPT-4o), including responses to both general-purpose and toxic prompts, influences the safety and overrefusal balance of instruction-following language models.\nAdditionally, we present POROver, a strategy to use preference optimization methods in order to reduce overrefusal, via employing a superior teacher model's completions.\nOur results show that overgenerating completions for general-purpose prompts significantly improves the balance between safety and usefulness. \nSpecifically, the F1 score calculated between safety and usefulness increases from 73.7\\% to 88.4\\%. \nMoreover, overgeneration for toxic prompts substantially reduces overrefusal, decreasing it from 94.4\\% to 45.2\\%. \nFurthermore, preference optimization algorithms, when applied with carefully curated preference data, can effectively reduce a model's overrefusal from 45.2\\% to 15.0\\% while maintaining comparable safety levels."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM safety",
"LLM usefulness",
"Overrefusal in LLMs",
"responsible AI"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3e49cdfb3023ec395fe3ae5167192f4f3e9abd1b.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/d89e70be052186e2d283c0c97caac7b980380e4e.pdf"
},
"title": {
"value": "POROver: Improving Safety and Reducing Overrefusal in Large Language Models with Overgeneration and Preference Optimization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5FKIynMPV6 | Bounds on the Reconstruction Error of Kernel PCA with Interpolation Spaces Norms | main | Active | kernel principal component analysis;reproducing kernel Hilbert space;high-dimensional statistics;convergence rate;interpolation space | learning theory | 3;6;8;8 | 2;2;4;3 | 3;2;4;3 | 3;3;4;4 | 2;4;4;3 | 6.25 | 2.75 | 3 | 3.5 | 3.25 | 0.773545 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Besides the points discussed above, there are the following minor points: \n- Several constants are used in the introduction (Section 1) without being introduced. This makes the discussion sometimes difficult to follow.\n- What does condition (2.11) in Reiss and Wahl (2020), quoted in Proposition 2.2, mean?\n- Figure 1: The experimental setup is missing here, and it's unclear whether the plots correspond to an actual experiment. This should be specified."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The topic is of current interest, and the results are clearly presented, discussed, and placed in the context of the existing literature. \n- The new bounds extend the existing results to any $0\\leq s\\leq 1$. The extension is significant and relevant for applications.\n- The identification and correction of a bug in the result $s=0$ is interesting and relevant to the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper gives bounds on the recontruction error of kernel PCA, measured in the full scale of interpolation spaces $[H]^s$ between $L_2$ ($s=0$) and the RKHS ($s=1$). \nAnalogous results exist in the literature for $s=0$ and $s=1$. However, the paper identifies a gap in the existing proofs for $s=0$ and gives an alternative, correct proof. Moreover, the results for $0<s<1$ are completely novel up to my knowledge, and they correspond to the existing ones in the limiting cases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The results in large dimensions hold on the sphere, as is clearly explained in Section 3.3. and especially in Theorem 3.8. This is a reasonable setting, but it should be made clear upfront in the introduction (e.g., in Table 1 and in Section 1.2 under \"Convergence rate of empirical kernel PCA in large dimensions\"). It would also be interesting to know more precisely what are technical limitations beyond the sphere?\n- The results are in part motivated by filling gaps in the existing literature, namely those discussed in Appendix C1 and Appendix C2. To the best of my understanding, these gaps are identified correctly. But the claim is quite significant, and it should be better discussed in the main text. Also, according to the two appendices, the existing arguments fail for very specific corner cases: An effort should be made to clarify if these are cases of general interest.\n- There are results in the approximation theory literature that seem to be closely related and should be discussed, e.g. Theorem 3 in [1] seems to prove a version Theorem 2.5 for s=0. See also [2].\n\n\n[1] G. Santin and R. Schaback, Approximation of eigenfunctions in kernel-based spaces, Adv. Comput. Math. (2016)\n\n[2] I. Steinwart, A short note on the comparison of interpolation widths, entropy numbers, and Kolmogorov widths, J. Approx Theory (2016)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Please address the missing step in the proof of Theorem 2.5 (see Weaknesses).\n- In Corollary 3.4, what is the dependency of the constant $C_3$ on problem parameters? is it polynomial?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "I am unable to assess the originality of this work w.r.t related literature, as I am not sufficiently familiar with this literature.\n\nIdentifying the important role played by \"assumption (C)\" in statistical analyses of kernel PCA is an interesting point.\n\nThe paper is very well structured and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work is concerned with kernel PCA, and specifically with the statistical performance of the empirical estimator, defined as the $\\ell$ principal components of the empirical kernel matrix $\\widehat{\\Sigma}_{ij} = k(X_i, X_j)$.\nThe main contributions are upper bounds on the empirical estimator's reconstruction error as a function of $\\ell$, for several error metrics, and under several statistical settings.\n\nThe error metrics considered are the interpolation space norms $[H]^s$ for $0 \\leq s \\leq 1$, defined in Section 2.4. For $s=0$ this amounts to considering the estimator's $L^2$ reconstruction error, and for $s=1$ it amounts to the RKHS-norm reconstruction error.\n\nThere are three statistical settings considered:\n- An abstract setting (section 3.1) where the only assumption is a condition referred to as \"assumption $(C)$\" in the paper. The following subsections make use of this abstract result.\n- The classical setting (section 3.2) where dimension $d$ is constant and where the kernel $k$ has polynomially decaying eigenvalues.\n- A high-dimensional setting (section 3.3) where sample size $n \\asymp d^\\gamma$ for some fixed $\\gamma>1$.\n\nAnother contribution is a rigorous proof that the minimum $[H]^s$-norm reconstruction error admits a simplified expression for all $0 \\leq s \\leq 1$ (Theorem 2.5).\n\nA secondary contribution is the remark that, in the high-dimensional setting of section 3.3, the RKHS-norm reconstruction error _of any estimator_ does not vanish as $n \\to \\infty$, implying that this error metric is unsuitable in this setting. Moreover, high-dimensional kernel PCA is shown to exhibit a similar phenomenon as in high-dimensional kernel regression: the periodic plateau behavior."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In the proof of Theorem 2.5, I don't understand why the orthonormality constraint $(\\psi_1, ..., \\psi_\\ell) \\in B_\\ell$ does not appear in the stationarity condition of $\\mathcal{R}_s$, equation (6). In fact I did not manage to recover equation (6) at all. This step of the proof deserves more details.\n\nThe last sentence of the abstract is not very clear, and \"$[H]^1$ norm\" is not defined at that stage. Perhaps a more precise statement would be that \"the RKHS norm is not a relevant error metric in high dimensions\".\n\nThe figures are difficult to read, the paper would greatly benefit from making them bigger. (E.g with matplotlib, reduce figsize and increase dpi.)\n\nThe paper contains many typos and unusual wordings:\n- line 17, in the abstract, remove space after opening parenthesis\n- line 118, add \"In\", or use \"contain\"\n- line 343, the statement of assumption (C), remove \"If\"\n- line 376, replace \"interested\" by \"interesting\"\n- throughout, consider using the word \"setting\" in place of \"circumstance\", which is less commonly used\n- line 424, remove space after opening parenthesis\n- line 470, add \"A\" at the beginning of the sentence\n- line 531, replace \"decayed\" by \"decaying\", and \"provide\" by \"provided\"\n- line 537, replace \"on\" by \"of\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- In page 1, notation $\\otimes_H$ appears without definition. Also it is unclear whether $f(X)$ represents a vector or a matrix. Please clarify this notation and provide definitions for these terms.\n\n- In page 3 there are discussions about the interpolation space norm, but this norm has not yet been defined. It’s difficult to follow the paper with references to undefined terms. Furthermore, a motivating explanation in the introduction about the significance of the interpolation space norm would be helpful. Why is this norm important, and how does it enhance the understanding of kernel PCA?\n\n- What specific norm is used in equation (2)? Is it Frobenius norm?\n\n- The presentation of this proposition could be improved. It references \"condition (2.11) in Reiß & Wahl (2020),\" yet does not restate the condition. Including the condition here would make the proposition more self-contained.\n\n- In remark 2.3, what is meant by H norm?\n\n- On page 5, inclusion map is not defined.\n\n- In line 301 it is written $\\langle \\lambda_i^{(s-1)/2} \\phi_i, \\lambda_j^{(s-1)/2} \\phi_i \\rangle_{[H]^s} = \\delta_{ij}$. Is this a definition of this inner product or is it deduced from some other fact or property? For example shouldn't the right hand side be $\\lambda_i^{s-1} \\delta_{ij}$ instead?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper provides a solid theoretical analysis of kernel PCA within the framework of a generalized norm, referred to here as the interpolation space norm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper examines the reconstruction error of kernel Principal Component Analysis (PCA) using interpolation space norms. The authors derive upper and lower bounds for the reconstruction error of empirical kernel PCA under specific conditions. They apply these bounds to two scenarios: polynomial-eigenvalue decayed kernels in a fixed-dimension domain, and the inner product kernel on a high-dimensional sphere, comparing their bounds to existing results. Notably, this work establishes a lower bound on the sample size necessary to ensure that the empirical reconstruction error approximates the optimal reconstruction error accurately. Additionally, the authors conclude that the $H^1$-norm is unsuitable for large-dimensional settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The presentation is suboptimal, making the paper challenging to read in its current form. There are multiple instances where notations or concepts are referenced before they are formally defined, impeding the reader’s ability to verify the correctness of the claims."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Can you please elaborator more on your paper comparing with \"Rosasco L, Belkin M, De Vito E. On Learning with Integral Operators[J]. Journal of Machine Learning Research, 2010, 11(2)\", where the kernel $\\Sigma$ in your paper is actually an integral operator?\n\n2. How about extending the current result to the manifold setting? As there are existing results of the convergence of some particular kernel based Laplacians on the manifolds.\n\n3. Given similarity between MDS and kernel PCA, would the error bounds work also for MDS?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The most significant strength is I think overall the paper addresses a gap that exists among the most recent works in a rigorous and inspiring way. It involves both the rigorous theoretical contribution mentioned above in the summary but also provides a novel use of Interpolation Norms that I personally find helpful and interesting for technical proofs. For applications, the high-dim behavior insights mentioned above are also of great practical importance given the topic kernel PCA is a practically popular method. Besides, the paper is well distinguished from the existing works. The paper offers a thorough comparison with existing bounds, demonstrating improvements over previous results and discussing where previous work lacked rigor. This transparency about advancements and limitations strengthens the credibility of the results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper focuses on understanding and bounding the reconstruction error of kernel Principal Component Analysis (PCA) using interpolation space norms. This work fills gaps in previous studies on kernel PCA by providing rigorous proofs and new bounds on the reconstruction error under specific conditions. Key contributions include: 1.Upper and Lower Bounds on Reconstruction Error; 2.Applications to some interesting settings including Fixed Dimension Domain, for polynomially eigenvalue-decayed kernels and High-Dimensional Sphere, for inner-product kernels where the dimension grows along with sample size. Moreover, he paper reveals that using [\\mathcal{H}]^{1}-norm in high-dimensional settings may be unsuitable due to inconsistent error behavior. In addition, a \"periodic plateau\" phenomenon in convergence rates is observed, where the reconstruction error rate stabilizes over certain intervals as the number of components (\\ell) changes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, the paper has few weaknesses. One point can be that its results are largely theoretical, with only limited empirical validation. More comprehensive experiments across various datasets and settings would strengthen the paper by providing practical evidence to support the theoretical claims. Since after kernel PCA is a widely applied method, adding various types of empirical behavior would definitively make the paper more appealing. The other point, which would rather be some improvements, is more from a practical point of view that for example, there are parameters say $s$ in 3.4 Corollary is in practice unknown. How to do adaptation to find a data driven $n$ is also needed here as there are some works focusing on adaptation on smoothness parameters in terms of estimation and regression etc. In general, the weaknesses mainly lie in the practical side (not the main focus of the paper), which however the strengths far exceed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bounds,\ntitle={Bounds on the Reconstruction Error of Kernel {PCA} with Interpolation Spaces Norms},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5FKIynMPV6},\nnote={under review}\n}"
},
"abstract": {
"value": "In this paper, we utilize the interpolation space norm to understand and fill the gaps in some recent works on the reconstruction error of the kernel PCA. After rigorously proving a simple but fundamental claim appeared in the kernel PCA literature, we provide upper bound and lower bound of the reconstruction error of the empirical kernel PCA with interpolation space norms under the assumption $(C)$, a condition which is taken for granted in the existing works. Furthermore, we show that the assumption $(C)$ holds in two most interesting settings ( the polynomial-eigenvalue decayed kernels in fixed dimension domain and the inner product kernel on large dimensional sphere $\\mathbb S^{d-1}$ where $n\\asymp d^{\\gamma}$) and compare our bound with the existing results. This work not only fills the gaps appeared in literature, but also derives an explicit lower bound on the sample size to guarantee that the (optimal) reconstruction error is well approximated by the empirical reconstruction error. Finally, our results reveal that the $[\\mathcal H]^{1}$ norm should not be used in the large dimensional settings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"kernel principal component analysis",
"reproducing kernel Hilbert space",
"high-dimensional statistics",
"convergence rate",
"interpolation space"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/567823f1c98e2885b7534687a6d3c8f79c34fea8.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bounds on the Reconstruction Error of Kernel PCA with Interpolation Spaces Norms"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5FXKgOxmb2 | MAGNet: Motif-Agnostic Generation of Molecules from Scaffolds | main | Active | graph generative models;2d molecules | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;6;8 | 4;4;4;4 | 2;2;3;4 | 2;2;3;3 | 1;4;3;3 | 5 | 4 | 2.75 | 2.5 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the \"Weaknesses\" section above for specific questions.\n\nIf my questions/concerns are addressed, I would consider raising my score."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(S1): The authors address an important problem of molecule generation, and identify a reasonable gap in the capabilities of prior models that they then try to address. The proposed approach is motivated clearly. \n\n \n\n(S2): The paper is generally well-written. The experiments are conducted across various setups that are common/relevant in this domain. While the empirical performance isn't across-the-board amazing, MAGNet seems to be a generally capable model with good performance overall, while improving on top of previous models in some settings, as well as enabling new capabilities (e.g. more ways to condition the generation on partial information)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new way to address chunk-based molecule generation. Instead of using fully specified molecular subgraphs (motifs) similarly to prior work, the authors instead abstract out motifs to their connectivity skeletons, which allows for a smaller vocabulary to cover a wider range of possible motif realizations. The authors then show a factorization of the generative procedure that first builds the scaffold by assembling these motif skeletons and then gradually fills in the atom features. The approach is verified on standard generation and optimization benchmarks, showing decent performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(W1): The authors argue the most direct comparison is to other OS models, and focus on that angle. Setting aside the fact that MAGNet is not purely one-shot as the scaffold multiset `S` is decoded sequentially and autoregressively, even if we agree MAGNet is OS, is there any inherent value that comes from being an OS model? One thing that comes to mind would be faster inference, as stepwise generation models can be expensive due to repeatedly encoding the current partial graph. So, is MAGNet more efficient than sequential decoding models, e.g. MoLeR? \n\n \n\n(W2): More interesting conditioning settings depicted in Figure 5 could be explained in more detail. The partial molecule induces a partial scaffold multiset `S`; do you then use this partial multiset and continue generating to get a full multiset, then connect the scaffolds while forcing those of the connections that are implied from the conditioning? I assume extending the multiset `S` with further scaffolds cannot directly take into account the fact that some of the scaffold connections (or scaffold instantiations into specific motifs) are already known from the conditioning, because during training the scaffold multiset extension subnetwork assumes only a multiset of generic scaffolds is known. Could this be an issue causing the model to add scaffolds that don't fit well with the partial molecule? \n\n=== Nitpicks === \n\nBelow I list nitpicks (e.g. typos, grammar errors), which did not have a significant impact on my review score, but it would be good to fix those to improve the paper further. \n\n- Line 147: Denoting the join node as `j` could be an index clash given that the scaffolds being considered are denoted as `i` and `j` earlier in the sentence. \n\n- Line 155: \"factories\" -> \"factorize\" \n\n- Line 309: missing space"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "I'll be honest here. I really liked this paper and I was going to recommend clear acceptance until I understood that MAGnet was an AR model \"disguised\" as an OS model. From that point onward, I couldn't shake the feeling that it was framed as OS only because, if put in the AR category, the results would become not so impressive (although still good). I really hope this was an unintentional mistake or misinterpretation by the authors. That's a shame in my opinion, since I believe the proposal is original and the evaluation was very thorough, the technical side of this paper is almost flawless. \n\nI will be recommending rejection of this paper for the moment, because in this form too many claims made in this paper stem from an incorrect premise. However, I am willing to hear from the authors why they consider MAGnet one-shot instead of sequential and will re-evaluate whether I could reconsider my judgment after the rebuttal phase."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- using scaffolds instead of motifs qualifies as an innovative proposal, which could be of value to the community\n- experimental evaluation is extensive in both depth (many baselines) and width (two benchmarks)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents MAGnet, a generative model for molecules. MAGnet is based on scaffolds (an abstraction of molecular fragments without atom and bond information, just the graph structure), which are introduced to factorize of the molecule distribution. MAGnet is a VAE-like architecture which generates new molecules by first predicting a scaffold set and its connectivity from latent space, then by predicting the atom and bond types for each scaffold, and finally by adding leaf nodes. Experiments show good performances in comparison with several baselines, on standard benchmarks such as GuacaMol and MOSES."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "throughout the paper, MAGnet is considered a one-shot (OS) molecule generator, when in fact it is auto-regressive (AR). Appendix B.2 states that the set of scaffolds is generated auto-regressively, so I cannot understand how it could be considered one-shot. According to the definition by Zhu et al. (2022):\n\n\n Sequential generation refers to generating graphs in a set of consecutive steps, usually done nodes by nodes and edges by edges. One-shot generation, instead, refers to generating the node feature and edge feature matrices in one single step.\n\nThis is very different from what it is stated in Section 2 of the MAGnet paper:\n\n Zhu et al. (2022) categorise the generation process further into sequential methods, building molecules per fragment while conditioning on a partial molecule.\n\nto justify the fact that MAGnet is OS. According to the same paper that is cited, MAGnet belongs to the AR category. If MAGnet is AR, then most claims made in the paper need to be toned down or changed because it falls in the same category as MoLeR, which has usually comparable or better performances than MAGnet. For example, the claim \"MAGnet is the best OS generator\" no longer makes sense."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. My understanding of the experiments presented in Figure 1-a, Figure 3 and Figure 9 carried out (Section 5.1) is that the fragment-based approaches fail to reconstruct complex motifs that are absent from their fragment vocabulary by using atom-based tokens only. In contrast, MAGNet contained these structures in its scaffold vocabulary. Why haven't both methods used the same dataset to construct their fragment/scaffold vocabularies, without limiting fragment-based methods to only the top-K fragments (i.e. including all fragments). In this case, the modeling task from the fragment-based methods using a vast vocabulary of fragments would be more difficult, but the method wouldn't lack these important fragments such as big rings, preventing them from reconstructing specific molecules. Have experiments on the baselines been carried out by varying the value of \"k\" in the top-k fragment-based vocabularies to see how vocabulary size trades-off with learning complexity? It seems to me that this might be a point where a more appropriate tuning of the baseline's hyperparameters would make a difference. And if the main advantage of the proposed factorisation is that it removes the need for such tuning when constructing the fragment vocabulary, it would be interesting to discuss these considerations in the paper."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "In general, I found the paper interesting and very well written. It clearly identifies a limitation of current fragment-based molecular generators in terms of expressivity, and present a well motivated solution based on a novel factorisation paradigm. I believe this is a good paper which brings significant contributions to fragment-based molecular generation approaches and for this reason I recommend that the paper should be accepted. I suggested some clarifications (see below) which I think would increase the clarity of the paper and complement the discussion on the limitations of the method and the general positioning of fragment-based and one-shot graph molecular generators.\n\nA few specific notes:\n1. The Related Work section is exhaustive.\n2. The proposed method is clearly introduced and detailed. In particular Figures 2 and 6 effectively convey the hierarchy supporting this paradigm.\n3. The experimental section is well structured and the authors compare to a large array of prior work and using several public datasets. The results effectively support the main claims made in the paper. In particular, the results presented in Figure 3, Figure 4 are compelling."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors aim to address a fundamental limitation of fragment-based molecular generators. The vocabulary of such models is defined as the union of common molecular fragments, obtained from datasets of known molecules, and individual atoms, which can be used to re-generate motifs that would not be listed under the available fragments. The authors argue that this choice of vocabulary creates an inherent tradeoff in the expressivity of the model. On the on hand, including more fragments quickly increases the vocabulary of the model, with the number of motif variation increasing exponentially with their size. On the other hand, learning to model missing fragments from individual atoms is a challenging task requiring even more training data and often leading to unrealistic molecular motifs. \n\nAs a solution, the authors propose a coarse-to-fine-grained molecular generation paradigm centred around molecular scaffolds, rather than fragments, as the basic building block of the model's vocabulary. A single scaffold implicitly captures many similar fragments, allowing for a relatively small vocabulary size while retaining expressivity. The proposed model, MAGNet, a VAE-based molecular generator, operates on this multi-level factorisation paradigm, by first sampling scaffolds and only then specifying the atomic composition of the scaffolds, their joints and the leaf nodes as a successive step. They evaluate this approach on several benchmarks against a variety of baselines and show that the proposed method, with its more expressive vocabulary, can reliably generate complex molecular motifs, in addition to allow for latent code optimisation and interpolation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. One of the main limitation of fragment-based methods is the absence of synthesizability considerations in the framework design. This significantly limits the applicability of such methods since, to be tested in physical and biological assays, the proposed molecules either require individual and expansive custom synthesis plans or have to be replaced by available analogs, thus drastically under-utilizing the expressivity of the model. It would be interesting to further discuss the limitations of the proposed method w.r.t. synthesizability in the manuscript.\n2. On line 371: \"We do not report Novelty and Uniqueness, as almost all evaluated models achieve 100% on these metrics.\" if this claim is made, then the numbers should be presented (at least in Appendix). Same for the mention right after: \"For the baselines DiGress, SM-LSTM, and CharVAE, which are not able to achieve 100% Validity, we sample until we obtain 10^4 valid molecules\", it could be informative to include these numbers in appendix (validity rate for each method).\n3. It would be useful to specify a bit more clearly how benchmarking on Guacamol is executed (lines 301-304 and 309-311). Is the model trained on ZINC and then evaluated on some reference set defined by Guacamol (Chembl)? Or is the model trained on Chembl molecules and compared to a test set also defined in Guacamol? While the provided references are useful, the description of the experiments should be standalone in the paper.\n4. In the goal-directed evaluations, it would be interesting to compare MAGNet with methods specifically aimed at goal-directed molecular design such as RL and GFlownet based methods.\n\n### Elements worth clarifying\n\n1. Figure 1 could be made clearer by further explaining parts a), b) and c) in the caption.\n2. The factorisation from graph to scaffold graph, and scaffold graph to molecular graphs, described in Section 3, would be clearer with a supporting Figure.\n3. The main baselines (PS-VAE, MoLeR and MiCaM) could be described in greater details.\n4. I found the discussion on novel conditioning capabilities very interesting (lines 469-476), however, even with this in mind, it is not clear to me what Figure 5 is showing or how it supports these claims. I think this figure could be improved to better support this discussion.\n\n### Minor comments:\n\n1. Typo on line 155, factorise*\n2. Line 150: not sure that App C.6 is the intended link here (or how it relates to the sentence)?\n3. Typo on line 302: to evaluate*\n4. Error in Figure 3: based on the text and the caption itself, it seems to be that the graph columns B and C are mixed up in Figure 3.\n5. I did not find the caption of Table 1 very natural to read. I would suggest simply specifying in parenthesis what underline and bold mean in the table, as opposed to underlining and bolding their description in the caption."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. \nCould the authors provide the **validity** of generated molecules? Are there specific metrics used to ensure that generated molecules are fully **connected**, given that some examples (such as in the bottom row of Figure 11) suggest molecules with broken structures?\n\n2. \nA comparison with baseline methods in terms of **generation speed** would add valuable context for practical deployment. \n\n3. \nCould the authors clarify the **vocabulary size** difference between your approach and baseline models?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper tackles a significant issue in 2D molecular generation, and Figure 1a provides an illustrative example that emphasizes the limitations of current motif-based methods in generating novel molecular structures. This emphasis on structural diversity is both timely and impactful for the field.\n\n\n2. Experimental results demonstrate an increase in generated structural diversity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses a critical limitation in substructure-based molecular generative models: the inability to capture the structural diversity in molecular space due to missing complex structures from the motif vocabulary. The authors propose a novel approach that employs a structural scaffold vocabulary, leaving atom and bond types to be predicted by the model. This approach is intended to enrich structural diversity, with specific metrics introduced to highlight the advantages of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limitations in Generating Novel Molecular Structures**\n\n1.1 A primary concern is whether the proposed solution truly resolves the problem of generating novel molecular structures. As highlighted in Figure 1a, the generation of new substructures—particularly complex fused ring systems—appears challenging. Both the original motif approach and the scaffold motif proposed in this work seem constrained in their ability to construct unseen complex fused rings because they are hard to piece together using substructures in the vocabulary.\n\n1.2 Furthermore, while the scaffold motif approach can potentially reduce vocabulary size, it still requires additional specification of bonds and atom types on rings, which might affect the validity of atom valence of the generated molecules. \n\nThus, while the problem raised is compelling, the method appears to partially address, rather than fully resolve, this issue.\n\n2. **Writing**\n\n2.1 Overlap in Sections 3 and 4: Sections 3 and 4 present overlapping content, which could benefit from a clearer delineation. Specifically, Section 4 should focus more on detailing the network structures (architecture) within each module, and provide an illustration of the model structure. The high-level probability descriptions, already covered in Section 3, could be streamlined here.\n\n2.2 Figure Captions: In Figure 3, the captions for panels (b) and (c) appear reversed.\n\n2.3 References: The authors should thoroughly review the reference list for consistency. It is full of preprint references. Accepted papers should reference their final publication locations rather than preprints (e.g. MiCaM and ShapeMol, etc). Please also check with repeated links and unusual endings with paper abbreviations (e.g. G-SchNet, JODO, etc.).\n\n3. **Experimental Results**\n\nWhile structural diversity has been enhanced, some important metrics, such as FCD and SA, appear to drop. Additionally, there is no mention of the validity rate of generated molecules in the table 1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024magnet,\ntitle={{MAGN}et: Motif-Agnostic Generation of Molecules from Scaffolds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5FXKgOxmb2},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent advances in machine learning for molecules exhibit great potential for facilitating drug discovery from in silico predictions.\nMost models for molecule generation rely on the decomposition of molecules into frequently occurring substructures (motifs), from which they generate novel compounds. \nWhile motif representations greatly aid in learning molecular distributions, such methods fail to represent substructures beyond their known motif set, posing a fundamental limitation for discovering novel compounds.\nTo address this limitation and enhance structural expressivity, we propose to separate structure from features by abstracting motifs to scaffolds and, subsequently, allocating atom and bond types. \nTo this end, we introduce a novel factorisation of the molecules' data distribution that considers the entire molecular context and facilitates learning adequate assignments of atoms and bonds to scaffolds. Complementary to this, we propose MAGNet, the first model to freely learn motifs. Importantly, we demonstrate that MAGNet's improved expressivity leads to molecules with more structural diversity and, at the same time, diverse atom and bond assignments."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"graph generative models",
"2d molecules"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/18db6e58953edc61190a85d710744e406948b1e9.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/d8d7f08e7fb6381b0d81089fe3f0f6165551760d.zip"
},
"title": {
"value": "MAGNet: Motif-Agnostic Generation of Molecules from Scaffolds"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5G9PrHERql | F2M-Reg: Unsupervised RGB-D Registration with Frame-to-Model Optimization | main | Active | RGB-D registation;unsupervised learning;frame-to-model optimization | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 5;5;5;6 | 2;2;4;4 | 3;2;2;3 | 2;2;2;3 | 3;3;2;2 | 5.25 | 3 | 2.5 | 2.25 | 2.5 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper introduces neural radiance fields to provide global information for the registration training\n2. It builds a synthetic dataset to pretrain the model, ensuring the rationality of pose initialization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a frame-to-model optimization framework guided by a neural implicit field for unsupervised RGB-D registration. By introducing the differential rendering capabilities of neural radiance fields, better pose supervision can be achieved. Meanwhile,this paper creates a synthetic dataset for warming up registration model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparison with existing work seems unfair: \n\n - sota methods like PointMBF are trained purely without supervision while the proposed method requires extra dataset with pose labels. In Table 1, the metrics of the proposed method looks slight better than existing unsupervised data. Does the improvement come from the benefit of including extra dataset? Please provide results without the boostrap using the process of training the registration model on the Sim-RGBD dataset for Table 1. Or please consider other ways to make a fair comparison. Will other work benefit the extra dataset with pose labels as well?\n\n - In line 368-370, the authors propose to evaluate more difficult setting “evaluating view pairs sampled 50 frames apart” and mentioned “However, due to insufficient overlap in some segments of the data for pairs sampled 50 frames apart, the evaluation significantly distorts both the mean and median values. As a result, we have chosen not to include these results in our experimental presentation.” My question: why this 50 frames apart setting? Also if the insufficient overlap pairs are excluded, \n\n2. The influence of the warming up dataset is not studied. How large the warming up dataset should be? How similar the warming up dataset should be to the target datasets? What’s is the influence of number objects of ShapeNet on the final metrics? For the construction of datasets, there are already many synthetic datasets for indoor scenes, such as Replica and Habitat. This paper does not demonstrate the advantages of the custom dataset compared to other datasets. \n\n3. The training efficiency. As NeRF is very slow in training, the proposed method should require much more computation resources than PointMBF and hard to scale."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. F2M-Reg stands out by shifting from a frame-to-frame to a frame-to-model approach for RGB-D registration, which is an extension of existing approaches in the context of unsupervised 3D vision tasks. \n2. This use of a neural implicit field as a global scene model to capture broader scene-level information is a possible direction to handle complex conditions, such as low overlap and lighting changes, where traditional methods often fall short.\n 3. The introduction of a synthetic bootstrapping dataset, Sim-RGBD, bridges the gap between synthetic and real-world performance in unsupervised settings, which is a notable improvement in unsupervised model initialization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents F2M-Reg, an unsupervised RGB-D registration framework that addresses the frame-to-frame registration task by dealing with multi-view inconsistencies with bootstrapping with a synthetic dataset."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although F2M-Reg is compared with several baselines, it is unclear where the improvement comes from. According to Table 4, the results without bootstrapping are not exciting enough.\n2. In order to evaluate the effectiveness of the neural implicit field-guided mechanism, this paper needs additional experiments and comparisons with SOTA approaches without bootstrapping."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- The performance gains from Sim-RGBD bootstrapping across different datasets is not quite consistent. Can the authors provide insights into why bootstrapping appears less critical for ScanNet compared to 3DMatch?\n\n- NeRF optimization, particularly joint pose and neural field optimization, typically assumes static scenes. This assumption might limit the method's performance in dynamic environments. If this is indeed a constraint, it should be acknowledged in the paper. If not, could the authors clarify why the method remains effective in dynamic scenarios?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper identifies the limitations of frame-to-frame matching in unsupervised learning, particularly due to instabilities arising from lighting variations, occlusions, and reflective surfaces. The proposed frame-to-model matching, supported by a neural implicit field (NeRF), effectively mitigates these issues, demonstrating a significant improvement over traditional methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an unsupervised method for robust RGB-D registration without ground-truth pose supervision. Unlike prior methods that rely on frame-to-frame photometric and geometric consistency, which are often affected by lighting changes, occlusion, and reflective materials, F2M-Reg employs a frame-to-model approach. The method begins with pre-training the model on a synthetic dataset, Sim-RGBD, with ground-truth poses, and subsequently fine-tunes it on real-world datasets without ground-truth poses by leveraging a neural implicit field as a 3D scene representation for pose optimization. This approach enhances robustness against multi-view inconsistencies, as demonstrated by experimental results comparing F2M-Reg with existing methods. In summary, F2M-Reg contributes a new unsupervised RGB-D registration framework, a synthetic dataset for initial model training, and an effective frame-to-model approach, setting new benchmarks on popular RGB-D datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The use of the term bootstrap in the paper is potentially misleading. In deep learning, bootstrapping generally refers to iterative self-training, where a model refines itself by generating pseudo-labels and learning from them. The training on synthetic data described in this paper aligns more with pre-training. However, the fine-tuning on real datasets with initial poses refined through NeRF could be called bootstrapping."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No ethics concern."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The paper states that the frame-to-frame framework experiences difficulties in maintaining multi-view consistency due to issues like lighting variations, geometric occlusions, and reflective surfaces; however, NeRF encounters similar challenges under these conditions. It should be clarified how the frame-to-model approach addresses these limitations in comparison to the frame-to-frame method.\n\n- In the proposed framework, initial poses are generated using a bootstrap method trained on synthetic RGB-D data. Considering that Co-SLAM, utilized for tracking within the proposed framework, also incorporates its pose initialization strategy based on constant movement assumption, how do these initial poses from the bootstrap method align or integrate with the initial pose assumptions in Co-SLAM? Specifically, please detail any adaptations or refinements made to ensure consistency between the poses initialized by F2M-Reg and the subsequent pose tracking performed by Co-SLAM,\n\n- Please also discuss the limitations of the proposed methods."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The writing is comprehensive and easy to follow\n- The proposed method demonstrates high performance compared to baseline methods on two real-world RGB-D datasets.\n- The ablation study on the main text and supplementary materials are thorough and effectively showcases the efficacy of the proposed framework."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduced a method for RGB-D registration using frame-to-model optimization. The pipeline incorporates a pretrained registration model suing PointMBF and Co-SLAM for point cloud registration. The experiments on real-world ScanNet and 3DMatch datasets present the superiority of the proposed method. The ablation studies investigated the effectiveness of each components."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The proposed method should not be characterized as unsupervised learning as it claimed. Instead, it involves a registration model that has been pretrained on a synthetic RGB-D dataset and subsequently adapted to real-world RGB-D scenes during inference. Strictly speaking, the method aligns more precisely with a zero-shot learning approach. Please ensure the claims are accurately represented.\n\n- The experiments could benefit from utilizing more recent datasets. The ScanNet-v2 dataset, while widely used, is dated and known for sensor noise that results in unreliable depth maps with many gaps containing zero or infinity values. More accurate and high-resolution RGB-D streams are available in newer datasets like ScanNet++ and TUM RGB-D. Conducting additional experiments on these recent datasets is recommended.\n\n- The proposed pipeline incorporates Co-SLAM, which utilizes a tracking system for camera pose estimation, requiring that the input RGB-D stream strictly follow a time series. However, this requirement shows limitations for general RGB-D registration tasks that handle multi-view data, where the input does not necessarily adhere to a time-series format. In scenarios involving unordered data, the effectiveness of a SLAM system may be compromised."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024fmreg,\ntitle={F2M-Reg: Unsupervised {RGB}-D Registration with Frame-to-Model Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5G9PrHERql},\nnote={under review}\n}"
},
"abstract": {
"value": "This paper focuses on training a robust RGB-D registration model without ground-truth pose supervision. Existing methods usually adopt a pairwise training strategy based on differentiable rendering, which enforces the photometric and the geometric consistency between the two registered frames as supervision. However, this frame-to-frame framework suffers from poor multi-view consistency due to factors such as lighting changes, geometry occlusion and reflective materials. In this paper, we present F2M-Reg, a novel frame-to-model optimization framework for unsupervised RGB-D registration. Instead of frame-to-frame consistency, we leverage the neural implicit field as a global model of the scene and use the consistency between the input and the rerendered frames for pose optimization. This design can significantly improve the robustness in scenarios with poor multi-view consistency and provides better learning signal for the registration model. Furthermore, to bootstrap the neural field optimization, we create a synthetic dataset, Sim-RGBD, through a photo-realistic simulator to warm up the registration model. By first training the registration model on Sim-RGBD and later unsupervisedly fine-tuning on real data, our framework enables distilling the capability of feature extraction and registration from simulation to reality. Our method outperforms the state-of-the-art counterparts on two popular indoor RGB-D datasets, ScanNet and 3DMatch. Code and models will be released for paper reproduction."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"RGB-D registation",
"unsupervised learning",
"frame-to-model optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/2da5ff5aa45ce8830b56081a7a3079e8f16ddb30.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/fcb011ae84d63ba77b58746af2e1cdf4ed543355.pdf"
},
"title": {
"value": "F2M-Reg: Unsupervised RGB-D Registration with Frame-to-Model Optimization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5GI6BGToyw | AtmosArena: Benchmarking Foundation Models for Atmospheric Sciences | main | Active | foundation models;atmospheric sciences;benchmarks | datasets and benchmarks | 3;5;6;6 | 4;3;3;4 | 2;3;3;3 | 2;3;3;3 | 3;3;3;3 | 5 | 3.5 | 2.75 | 2.75 | 3 | -0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. **New Perspective:** The article evaluates atmosphere modeling from a multitasking perspective and verifies the effectiveness of two pre-trained models: ClimaX (multi-source pre-trained models) and Stormer (single-source pre-trained models).\n2. **Open Source:** By providing a standardized and open-source framework, AtmosArena sets a new standard for reproducibility and transparency in multi task atmospheric learning.\n3. **Include Finetuneing:** The paper explores the finetuning protocols for foundation models, comparing frozen versus fully finetuned backbones.\n4. **Diverse Tasks:** AtmosArena benchmark includes both atmospheric physics and atmospheric chemistry tasks, and the benchmarks utilize well-regarded datasets like ERA5, ClimateBench, and ClimateNet."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces AtmosArena, a benchmark designed for evaluating foundation models in atmospheric sciences, focusing on multi-task spatiotemporal learning. The paper mainly evaluates two prominent models, ClimaX and Stormer, comparing their performance with traditional baselines across various tasks. Stormer performs well in short-term forecasting, while ClimaX excels in tasks with longer temporal horizons, highlighting the benefits of multi-source pretraining. Overall, the authors demonstrate that pre-trained models generally outperform task-specific baselines, and AtmosArena serves as a comprehensive tool for advancing atmospheric modeling."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Regarding the completeness of the benchmark, I believe the authors should add relevant metrics, such as **inference FLOPs and model parameters**.\n\n2. The authors need to provide more **evidence (e.g., frameworks and code)** to demonstrate the benchmark’s ease of use and reproducibility.\n\n3. Although the authors conducted extensive experiments to show that no single model excels across all tasks, I believe they should **further analyze** the experimental results rather than simply testing performance across different tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please explicitly describe how the presented tasks complement each other and aim to test the necessary variables, and spatial and temporal scales to qualify a model as 'foundational'."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "While each component of the dataset is not novel, the combination and the testing of multi-model foundational models make this a useful contribution. The baselines are well chosen and the data appears to be well structured (though I have not tested this). The paper is well structured and well written. In particular, each of the sub-tasks defined in the 'arena' are appropriate and complementary, and the baselines are appropriate and test interesting aspects of their generalizability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a comprehensive collection of benchmarks, with appropriate baseline models, for tasks related to atmospheric science and prediction."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "It isn't entirely clear that any of the presented models are foundation models; it feels like the 'atmosphere' is a bit too specific of a system to qualify as foundational. To some extent this is something the cited models need to demonstrate, but nonetheless, to demonstrate its utility, this paper should describe what they mean by a foundation model for the atmosphere, and why the tasks they present (within a fairly narrow range of spatial and temporal scales) would test such foundational knowledge. Relatedly, the paper feels a bit like a collection of benchmarks and could do a better job of explaining why the various tasks are complementary, e.g. because they all rely on some underlying understanding of the covariance of the atmosphere over different spatial and temporal scales (be explicit)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Paper Questions:\n- in section 4.5, Table 4, the numbers reported for ClimaX, ClimaX frozen and\n ClimateBench-NN are exactly the same as the values reported in the ClimaX\n publication Table 2, which I do find a bit surprising that there is no\n statistical difference given the non-deterministic nature of Deep Learning\n model training and a rather small data size\n- is there a reference for the Spectral Divergence metric you use, or is this a metric you propose in this work?\n- Line 18, you say the \"first multi-task benchmark\", but in table 1 you list\n existing works that consider \"multiple atmospheric tasks\" so I don't think\n your claim of \"first\" holds, but maybe you can clarify?\n\nGeneral Comment:\n- I believe a work like this is highly dependent on the quality of the code,\n this is difficult to assess in this review process which is very unfortunate"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The set of tasks under this new benchmark suite is larger than any of the individual existing frameworks and therefore offers the chance for a more in depth comparison of foundation models across tasks. The paper is clearly written and structured such that it is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a benchmark suite in which they collect several benchmark datasets into one framework which therefore offers a broader set of tasks to evaluate foundation models in weather and climate. Using their AtmosArena setup, the authors evaluate two prominent foundation models, ClimaX and Stormer on the set of atmospheric phsysical tasks and highlight performance\ndifferences that underline the usefulness of the benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In my opinion a shortcoming of the proposed framework is that it is more a collection of existing benchmark frameworks like Weatherbench, ClimateBench, and ClimateNet and misses some opportunities for improvement. This statement to me is not about \"novelty\", but rather remaining shortcomings or inconsistencies under this new framework.\n\n- While you state that in the future you would like to have a public\n leaderboard, I think this is already something to be expected from such a\n framework. Something like the google [Weatherbench leaderboard](https://sites.research.google/weatherbench/) is a decent\n refrenece to get a first idea of perfomrance across models\n- For the task of climate down scaling you employ the RMSE, Bias and Pearson\n correlation as available metrics, however, especially here, metrics like Power\n Spectral Density (PSD), and PSD plots are important and offer more insights than a single error metric value\n- in fact, for all climate related prediction tasks (Forecasting, Super-resolution, inpainting mentioned in your paper) spectral metrics are\n commonly employed in the atmospheric science literature and should be a part of an atmospheric benchmark\n- as another point, I think not including any probabilistic metrics across the\n collected tasks is unfortunate because simple point estimates just come short\n of the complexities given these tasks. A notion of prediction uncertainty and\n an assessment of the predictive uncertainty with metrics like proper scoring\n rules seems essential when aiming to do an holistic analysis. Probabilistic metrics were for example included in Weatherbench2 but seem to be missing from your framework. \n\nGiven that from my understanding you have collected existing frameworks under a new benchmark suite, I would have expected some additional improvements about the shortcomings of those, especially surrounding additional evaluation metrics as this is such an important part of benchmarking. While the employed metrics are very common in machine learning, I think additional metrics like PSD that exist in the atmospheric science literature are essential for a good benchmark framework."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "AtmosArena has a great overlap with ClimateLearn, ClimaX, and Aurora. Almost all the tasks and datasets except for ClimateNet and Berkeley Earth are already in the other datasets. Is it that you created new annotations, or is it easier to use? Why do we need AtmosArena and not to test on the other ones directly?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper presents a standardized benchmark for climate and weather evaluation with data, metrics, code, and baselines. It evaluates all consistently, transparently, in a way that it is easy to reproduce and that will facilitate and boost the research in this area."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This article proposes a new benchmark for evaluating atmospheric and weather foundation models. It comprises the following tasks: \nweather forecasting, S2S forecasting, climate data infilling, climate model emulation, climate downscaling, and extreme weather events detection with several metrics for each task. datasets, fine-tuning protocols, evaluation code, standardized metrics, and traditional and machine learning baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "AtmosArena, does not offer a pertaining dataset, only evaluation.\n\nIt is not clear to me why we need a new benchmark. In the related work section of benchmark three should be a clear comparison with the other benchmarks and why this one is better and needed."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce AtmosArena, the first benchmark dedicated to foundation models in atmospheric sciences."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024atmosarena,\ntitle={AtmosArena: Benchmarking Foundation Models for Atmospheric Sciences},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5GI6BGToyw},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep learning has emerged as a powerful tool for atmospheric sciences, showing significant utility across various tasks in weather and climate modeling. In line with recent progress in language and vision foundation models, there are growing efforts to scale and finetune such models for multi-task spatiotemporal reasoning. Despite promising results, existing works often evaluate their model on a small set of non-uniform tasks, which makes it hard to quantify broad generalization across diverse tasks and domains. To address this challenge, we introduce AtmosArena, the first multi-task benchmark dedicated to foundation models in atmospheric sciences. AtmosArena comprises a suite of tasks that cover a broad spectrum of applications in atmospheric physics and atmospheric chemistry. To showcase the capabilities and key features of our benchmark, we conducted extensive experiments to evaluate two state-of-the-art deep learning models, ClimaX and Stormer on AtmosArena, and compare their performance with other deep learning and traditional baselines. By providing a standardized, open-source benchmark, we aim to facilitate further advancements in the field, much like open-source benchmarks have driven the development of foundation models for language and vision."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"foundation models",
"atmospheric sciences",
"benchmarks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5d0feac8abd96fa405cd12cf7070e2df9f9f597d.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "AtmosArena: Benchmarking Foundation Models for Atmospheric Sciences"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5GZuEZDmUE | Spectral Truncation Kernels: Noncommutativity in $C^*$-algebraic Kernel Machines | main | Active | kernel methods;positive definite kernel;spectral truncation | learning theory | 3;3;5;5 | 3;4;2;3 | 3;3;3;2 | 2;2;3;2 | 2;2;3;3 | 4 | 3 | 2.75 | 2.25 | 2.5 | -0.707107 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Overall I would be happy to increase the score of the paper (around 4) if answers are brought during the rebuttal.\n\n- Please clarify the motivation and express the pros and cons with previous competing methods\n- Give at least one example to make the reader understand the issue with commutativity and the benefit of non commutativity\n- Identify as much as possible the family of problems that could in principle be tackled by this method\n- Complete the toy experiments with a comparison with other function-valued regression methods \nfor the existing experiments, are the same curves observed when dealing with a very large data regime ?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper is following an original path in the kernel theory, exploring new schemes of kernels with values in a C* algebra. The work is certainly promising and of great interest for the kernel community. based on solid mathematical work, it opens a new way to tackle vector-valued or function-valued regression.\n- Of special interest on the case of input and outputs that can be considered as functional (or vectors that can be seen as values of functions like images), the spectral truncation kernel allows for a drastically reduced computation cost in Kernel Ridge Regression while offering a great expressivity. \n- It can be declined with various choices of ground kernels and its positive definiteness is studied\n- Products of (function-valued)-functions based on those kernels provide a deep architecture."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the recent subfield of positive definite kernels with values in a C*-algebra and RKHM (the correponding \"RKHS\" theory) . The whole work is motivated by going beyond the separable kernel widely used in vector-valued RKHS based on operator-valued kernels and benefit from with a better compute time when applying kernel “ridge” regression. The main interest of working with a C*-algebra is that it comes with a norm, a product and an involution, unifying operators and functions. In particular, the paper focuses on the C*-algebra of continuous functions and the case where inputs as well are elements of this C* algebra. The paper is illustrated with the example of continuous functions on the 1D torus. The authors propose a novel function-valued kernel, spectral truncation kernel, relying on the approximation of the multiplication operator with respect to x (defined in L2(T)) by leveraging a truncated spectral decomposition. The dimension of the truncated basis encodes a trade-off between the representation power and the model complexity. The resulting kernel also benefits from the noncommutativity of the approximated product and can be shown to converge. Applied on Kernel Ridge Regression in RKHM, this new kernel leads to a reduction of the complexity in time. It also comes with a generalization bound which is a direct instantiation of the result proven by Hashimoto et al. (2023). A deep architecture based on product (and not composition) of different kernel-based functions in RKHM is also presented. Experimental results study the behaviour of the approach with respect to the truncation parameter on a toy dataset. An additional result on an inpainted image recovery problem built on MNIST data is also briefly presented."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Even though very interesting the paper suffers from different flaws: some concern the presentation and can be considered as relatively minor, while the others are more fundamental.\nWeaknesses in the content:\n*The motivation of the paper remains unclear: if the authors wish to go beyond the separable kernel in the general case of vector-valued functions, they should briefly discuss the limitations (which indeed exist) of the different operator-valued kernels such as the transformable kernels, the separable kernels or combination of them. If the motivation is to use RKHM theory in the case of function-valued functions then it is of paramount importance to highlight what cannot be done with operator-valued kernels devoted to functions with outputs in Hilbert spaces of functions. \n*Once a family of kernels is defined, in machine learning, we are interested on the ability of the machine learning algorithms to indeed take benefit from this kernel and provide a good solution to the ML task. So what is missing in this paper, is a discussion and an empirical study to determine when using those kernels are interesting compared to previous methods: does the complexity of the model make the algorithm more greedy in training data. I do not think that the actual generalization error bound really help to tell us that in precise terms.\nMy advice is thus to complete the paper with comparison with other (operator-valued) kernels and vv-RKHS. this has to be done in the case of the current toy dataset but also in known functional regression data sets.\n* Applicability and relevance of the methodology: a central question that is still not enough answered at the end of the paper is the following: on which family of problems, these spectral truncation kernels are relevant ?\nFor instance, the use of function-valued kernels for inputs and outputs which are vectors should be discussed. I think it is important here to clarify this: images are by definition a discretisation of continuous maps (intensity of pixel in finite resomution) and then they can be seen as a set of values of a function taken on different observation points. There is a great interest at considering the functions as continuous functions.\n I do not think it is always meaningful for a vector to be encoded as a function of its coordinate index: can you comment on that ? \n* Finally I do think that the paper would have sufficient content if it was restricted to function-valued functions. However, if images are tken as examples, then more convincing and complete comparison on image completion should be givne with more involved problems than weakly inpainted images. For the results given, do not say vv-RKHS comparions in the table say clearly the name of the matrix-valued kernel you used and try other kernels including more general operator-valued kernels for function-valued functions.\n\nWeaknesses in the presentation of the paper.\nThe paper is in general not self content, too much straightforward in its statements and very not enough precise in the presentation. It seems to me that an important work of re-writing is necessary, even though it is quite obvious that some efforst ahve been made here.\nTo give a few suggestions:\n- rewrite the introduction with clear motivations and do not enter into partial details that cannot be understood at this stage (n < infty, n infty..)\n\n- line 154 we jump into a comment about C(T) but previsously functions on the real torus were just an example. Now X = A ?\nplease say it !Moreover we cannot understand the sentence \"however, by approximating ... by a Toeplitz matrix..;\", we do not know yet that Toeplitz matrix will be involved here \n\n\n- before line 174, say a word about the works of Van Suijlekom and explain the role of the Fejer function. The reader has to consult this apper to undertstand the construction. It is important in what follows when talking about convergence.\n\n- in general, do not give proposition under the form of the sole formula but write a sentence introducing the property and a comment on what the proposition brings.\n- generalization bound : clearly state as in the appendix that this result comes directly from previous literature (Maurer, 2016... Hasimoto et al. 2023)\n- It is crucial to introduce m when describing the observations at line 364.\n- experiments : \nwhat do you want to bring in terms of emprirical evidence: please present the experiments as an answer to the questions/motivation of the beginning of the paper"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper introduces an approach to kernel design by leveraging the mathematical framework of C*-algebras and RKHM, offering a potentially powerful way to model complex data relationships. The theoretical analysis of generalization bounds provides valuable insights into the trade-off between representation power and model complexity, guided by the kernel's truncation parameter."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new class of C*-algebra-valued positive definite kernels called spectral truncation kernels for vvRKHS. The noncommutativity, controlled by a truncation parameter n, allows for capturing interactions along the data function domain. The paper argues this enables a balance between representation power and model complexity, potentially leading to improved performance. A generalization bound is derived, highlighting the role of n in this tradeoff."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the authors claim a computational advantage over vector-valued RKHSs (vvRKHSs) due to the linear dependency on output dimension m compared to cubic dependency in vvRKHSs, this advantage is not clearly demonstrated. The computational cost analysis lacks a direct comparison with vvRKHSs employing appropriate approximation techniques. For instance, the use of Nyström methods or random Fourier features could significantly reduce the computational burden of vvRKHSs, potentially negating the claimed advantage of spectral truncation kernels.\n\n2. The deep model extension, while promising, lacks theoretical grounding. The analysis of representation power growth is based on a very specific construction and doesn't provide general insights into the behavior of deep networks with spectral truncation kernels. \n\n3. The experimental results, while suggestive, are not compelling enough to validate the claimed advantages. The experiments are limited to synthetic data and a simplified MNIST task. More complex, real-world datasets with function-valued outputs are needed to assess the practical performance and demonstrate a clear advantage over existing methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors have introduced the basic properties of the proposed kernels and also investigated the generalization error. The presentation is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new class of positive definite kernels based on the spectral truncation. Detailed properties and examples have been discussed, and numerical results on both synthetic data and the MNIST dataset are presented."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Though the theoretical results are promising, two main questions remain unaddressed:\n1. How can practical learning designs benefit from the new algebraic structures?\n2. How can the development facilitate the kernel choices in practice? Section 6 seems to have some discussions on deep models, but a general development shall be data/task-dependent."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In line 156, $z \\in \\mathbb{T}$ be the Fourier function. Why $z$ is a function? Based on the **Example 2.2**, $\\mathbb{T}$ is a set, and the elements of $\\mathbb{T}$ are real numbers. This puzzled me.\n2. One of the main contributions of this paper is that the authors generalize typical kernels by introducing the noncommutativity of the products appearing in the kernels and showing their advantages. This is because $\\mathcal{R}_n (x)$ and $\\mathcal{R}_n (y)$, based on the spectral truncation, is noncommutative. However, the benefits of introducing noncommutativity in terms of convergence and generalization were not found. The effect of the noncommutativity is just illustrated in the experiment part. Or am I missing some details?\n3. **Theory 3.4** gives the theoretical result of the convergence of proposed kernels. Does this mean that the proposed $k_n^{poly,q}(x, y)(z)$, $k_n^{prod,q}(x, y)(z)$, and $k_n^{seq,q}(x, y)(z)$ can approximate $k^{poly,q}(x, y)(z)$, $k^{prod,q}(x, y)(z)$, and $k^{seq,q}(x, y)(z)$, respectively? So is there any theoretical guidance for the selection of $n$, that is, how much $n$ can be well approximated?\n4. From **Theory 4.1**, we can observe that the generalization bound is related to the trace of the kernel. How is this different from the previous theoretical results?\n5. $n$ is the number of orthogonal bases. Therefore, the complexity of the model is larger if $n$ is larger, and the representation power of the model is better. This phenomenon also occurs in the general learning process or kernel function approach strategies. How is this different from them?\n6. To obtain $c(z)$, computing $(G(z)+\\lambda I)^{-1}y(z)$. Poor scalability.\n7. Some definitions are in the complex number domain, while some are in the real number domain. It is confusing for me. When to do it in the complex number domain and when to do it in the real number domain. It can include a pseudo-code to show the details."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper is well written and organized.\n2. Theoretical analysis is detailed, enabling the proposed method to have solid theoretical support.\n3. The authors consider the deep spectral truncation kernel, sharing the advantage of both deep model and spectral truncation kernel, to improve the representation power."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, authors propose a set of positive definite spectral truncation kernels, which is a class of the $C^*$-Algebra-valued kernel. The definitions of the proposed kernels involve several concepts, including $C^*$-Algebra, function-valued kernel, spectral truncation, and the torus. The authors provide a theoretical analysis of the convergence and generalization. In addition, the authors introduce a noncommutativity and further illustrate its effectiveness with numerical results."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Please see the **Questions** section."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a new class of positive definite kernels based on the spectral truncation, which address two issues regarding vector-valued RKHSs, the choice of the kernel and the computational cost."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024spectral,\ntitle={Spectral Truncation Kernels: Noncommutativity in \\$C{\\textasciicircum}*\\$-algebraic Kernel Machines},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5GZuEZDmUE},\nnote={under review}\n}"
},
"abstract": {
"value": "$C^*$-algebra-valued kernels could pave the way for the next generation of kernel machines. To further our fundamental understanding of learning with $C^*$-algebraic kernels, we propose a new class of positive definite kernels based on the spectral truncation. We focus on kernels whose inputs and outputs are vectors or functions and generalize typical kernels by introducing the noncommutativity of the products appearing in the kernels. The noncommutativity induces interactions along the data function domain. We show that it is a governing factor leading to performance enhancement: we can balance the representation power and the model complexity. We also propose a deep learning perspective to increase the representation capacity of spectral truncation kernels. The flexibility of the proposed class of kernels allows us to go beyond previous commutative kernels, addressing two of the foremost issues regarding learning in vector-valued RKHSs, namely the choice of the kernel and the computational cost."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"kernel methods",
"positive definite kernel",
"spectral truncation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/915eb5b834d8f98947967d42e8866adf36fd2d25.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/533c0bcb6cf9cbf2a3ea0086fbf51a9081600846.zip"
},
"title": {
"value": "Spectral Truncation Kernels: Noncommutativity in $C^*$-algebraic Kernel Machines"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5GauLpaNGC | Task Characteristic and Contrastive Contexts for Improving Generalization in Offline Meta-Reinforcement Learning | main | Active | Reinforcement Learning;Meta-Reinforcement Learning | reinforcement learning | 3;6;8 | 4;4;3 | 2;3;3 | 2;3;3 | 1;3;3 | 5.666667 | 3.666667 | 2.666667 | 2.666667 | 2.333333 | -0.802955 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. TCMRL brings a fresh perspective by dynamically disentangling characteristic features from the trajectories while also maximizing interrelations among tasks.\n\n2. The paper is written clearly, with a logical structure that makes it easy for readers to follow the flow of ideas. Key concepts such as task characteristic and contrastive information are well explained, with visual aids like figures and pseudocode to help illustrate the framework.\n\n3. TCMRL improves the generalization capability of context-based offline meta-RL."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a framework called Task Characteristic and Contrastive Contexts for Offline Meta-Reinforcement Learning (TCMRL), which aims to enhance the generalization ability of context-based offline meta-RL methods. TCMRL introduces two key components: a task characteristic extractor and a task contrastive loss, which work together to generate more comprehensive contexts by capturing both characteristic and contrastive task information. The task characteristic extractor uses an attention mechanism to emphasize transitions that are crucial for characterizing a task, while the task contrastive loss helps to distinguish different tasks by exploring interrelations among trajectory subsequences. Experiments demonstrate that TCMRL significantly improves adaptation to unseen tasks, outperforming existing offline meta-RL methods on multiple benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some results are reported as normalized scores, e.g. Table 1. However, there is no explanation for how normalization is processed.\n\n2. Although context shift is highlighted as one of the primary issues that TCMRL aims to solve, there is no in-depth analysis of how TCMRL reduces context shift compared to other methods, and potential limitations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Please see the weakness part."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The motivation behind learning both task characteristic and task contrastive information for better meta generalisation is reasonable.\n\nThe proposed method is evaluated on many meta RL environments and empirical results show improved performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work targets the problem of offline meta RL by learning a context of a task information from trajectories so that the learned context encoder can quickly capture characteristics of an unseen test task with limited interactions. Specifically, the authors propose to learn such context encoder by conditioning a reward neural network on a weighted aggregation of transition encodings in a trajectory. The authors also propose to train the context vector by penalising rewards prediction when conditioned on a reversed weighted version of context. This work also leverages contrastive learning to train transition encoding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposed method in this work consists of many components around the context encoder training. However, It is unclear to me what is the fundamental technical reason behind these kinds of design and why these specific designs can achieve desired behaviour of the context encoder. There are many explanations in the method part in section 4 but they are not well structured in logic and look very lengthy:\n\n(1) Line 38: “as only a few key transitions within the trajectory provide the main task characteristic information…” This is to say many other transitions do not distinguish tasks. I have concern over this statement as this is only probably correct when the tasks have some property like a hierarchical structure. In general when the dynamics of a target task has a consistent shift on the entire state space, such sparsity prior would not be beneficial.\n\n(2) It is unclear to me why Eq. (7) and Eq. (8) can lead to learning a context encoder such that the task characteristic extractor q can capture task unique transitions. The neural network is probably able to capture task conditioned reward with a task-level context without learning relations in terms of tasks transitions. In my opinion, the network does not promote the correct importance score of c_i. It probably only makes c_i and c_i^neg different and that is enough to learn a conditional reward function under Eq. (7) and (8).\n\n(3) Are r and r_reverse in Eq. (7) and Eq. (8) the same neural network with same parameters? \n\n(4) It seems that Eq. (7) and Eq. (8) only capture the task shift in terms of reward function while the transition dynamics is ignored (no loss function in terms of next state prediction). Can authors please explain the reason?\n\nOverall, the proposed method consists of several modified versions of previous loss functions and is also combined with existing contrastive learning technique. The technical novelty is not strong and there is no theoretical analysis on why the proposed objective function can guarantee generalisation in a meta learning setting."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.\tAs illustrated in the weaknesses, why is the lack of generalization attributed to the absence of task characteristic information and task contrastive information? Could you explain this in more detail, and how do you perceive the relationship between these two types of information?\n2.\tWhen extracting task characteristic information, why not consider using a well-established architecture like the Transformer? Given that Transformers leverage self-attention mechanisms to extract key information from sequences and create unified representations while capturing internal relationships within sequences, it seems like a viable option.\n3.\tCould you provide the rationale for designing the negative reward estimation as you did? What motivated this specific design?\n4.\tHow do you determine the proportions of the various losses in the optimization process? I believe that the hyperparameters setting these ratios significantly impact the method’s performance.\n5.\tIn the ablation study, as shown in Figures 5 and 6, I noticed that, in experiments like reacher-v2, removing an individual component within TCE results in greater performance loss than fully removing TCE. How would you explain this phenomenon?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "• Clarity: The paper is well-articulated, with a clear and complete structure that makes the methodology and findings accessible to readers. Additionally, the appendix provides extensive experimental details and analyses.\n• Significance: I believe the authors have targeted a valuable goal, namely addressing the challenge of context shift in context-based offline meta-reinforcement learning methods. The paper demonstrates consistent improvements over baseline methods across a wide range of experiments, showcasing the robustness of their approach in tackling this important issue."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the limitations in generalization and adaptation of existing context-based offline meta-reinforcement learning (meta-RL) methods. The proposed framework, TCMRL, enhances context generation by incorporating both task characteristic information, which identifies key transitions within tasks, and task contrastive information, which distinguishes tasks through interrelations in trajectory subsequences. This combined approach yields a comprehensive task understanding, improving adaptability to unseen tasks. Experiments confirm TCMRL’s advantage in generating generalizable contexts and effective adaptation over previous methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Quality: The paper lacks a strong motivational foundation, particularly in explaining why task characteristic information and task contrastive information are expected to enhance context generalization. While the authors introduce a novel method based on these two types of information, the construction of the approach appears somewhat arbitrary, relying on intuition rather than solid theoretical underpinnings. An improved presentation could include theoretical justifications or empirical evidence demonstrating that capturing these specific forms of task information is indeed crucial for generalization.\n• Originality: Although the technical implementation is undoubtedly innovative in its details, the underlying concepts are relatively familiar within the field. Techniques such as implicit attention mechanisms, context encoding, and task-based contrastive learning have been explored previously, and this paper can be seen as a new combination of these existing ideas."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose TCMRL, a framework that improves the generalization in offline meta-RL by capturing both task characteristic and task contrastive information, resulting in generalizable contexts and effective adaptation to unseen target tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024task,\ntitle={Task Characteristic and Contrastive Contexts for Improving Generalization in Offline Meta-Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5GauLpaNGC},\nnote={under review}\n}"
},
"abstract": {
"value": "Context-based offline meta-reinforcement learning (meta-RL) methods typically extract contexts summarizing task information from historical trajectories to achieve adaptation to unseen target tasks. Nevertheless, previous methods may lack generalization and suffer from ineffective adaptation. Our key insight to counteract this issue is that they fail to capture both task characteristic and task contrastive information when generating contexts. In this work, we propose a framework called task characteristic and contrastive contexts for offline meta-RL (TCMRL), which consists of a task characteristic extractor and a task contrastive loss. More specifically, the task characteristic extractor aims at identifying transitions within a trajectory, that are characteristic of a task, when generating contexts. Meanwhile, the task contrastive loss favors the learning of task information that distinguishes tasks from one another by considering interrelations among transitions of trajectory subsequences. Contexts that include both task characteristic and task contrastive information provide a comprehensive understanding of the tasks themselves and implicit relationships among tasks. Experiments in meta-environments show the superiority of TCMRL over previous offline meta-RL methods in generating more generalizable contexts, and achieving efficient and effective adaptation to unseen target tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement Learning",
"Meta-Reinforcement Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1dc37874ea7b14654997be903824521052b71435.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Task Characteristic and Contrastive Contexts for Improving Generalization in Offline Meta-Reinforcement Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5GgjiRzYp3 | Intent3D: 3D Object Detection in RGB-D Scans Based on Human Intention | main | Active | 3D Visual Grounding;3D Multimodal Learning | datasets and benchmarks | 5;5;5;6 | 2;2;3;4 | 3;2;3;3 | 3;2;3;3 | 3;2;3;2 | 5.25 | 2.75 | 2.75 | 2.75 | 2.5 | 0.870388 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness section. \n\nI am willing to increase my score if the author resolves my major concern with some additional baseline experimentations."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents a clear motivation for 3D-intention grounding, and it includes clear illustrations and presentations of dataset collection procedure.\n\n2. Soundness of each component design of the IntentNet, and thoroughly ablations on each component of the proposed pipeline design.\n\n3. Extensive experiments and discussions demonstrate the effectiveness of the proposed framework compared to different types of baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new task named 3D-intention grounding, which is 3D object-detection from direct human-intentions. The paper collects Intent3D dataset which includes 1042 scenes from ScanNet and corresponding paired human-intentions questions and 3D object detections answers. IntentNet is proposed to tackle 3D-intention grounding task by candidate box-matching, verb-object alignment and cascaded adaptively learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major concern:\n\nI am concerned about possible baseline unfair comparison in the experiment section. Most baselines are designed to tackle nouns-types of questions instead of human-intention types of questions. What if we pass the question to a finetuned LLM and let it infers what types of nouns/objects the question is targeting at from possible objects in a scene detected by existed 3D object detectors? The possible performance of these baselines might be much higher after it is given the object it is expected to detect in a scene. \n\nMinor concern:\nIt would be interesting if the author can provide some cases where IntentNet fails but other models succeed. Particularly if other models are fed with object/noun directly."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1:There has been a few methods to combine 3D scene understanding with LLM beyond Chat3D v2, such as LL3DA, Grounded 3D-LLM, ReGround3D and so on. What are the advantages of this paper compared to theirs? More experimental evidence is needed to demonstrate the advantages of the this paper over other methods.\n\n2: Current large language models can also directly infer human intentions; what are the advantages of this paper compared to them?\n\n3:When filtering Non-trivial Objects, objects with more than six instances in fine-grained categories are directly removed, which may lead to the exclusion of commonly used objects. Could we consider adding more fine-grained descriptions for these objects instead of outright deletion?\n\n4: Figures 3 (d) and (e) indicate that the dataset lacks sufficient diversity in the types of objects included. Experimental validation is necessary to determine whether the variety of object types included in the dataset is sufficient for the model to learn effectively."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1: A new task in 3D object detection employing RGB-D, based on human intention, facilitates smoother and more natural communication between humans and intelligent agents.\n\n2:The author propose a high-quality vision-language dataset and focuses on the human’s intention for 3D object detection, which will facilitate the progress of 3D scene understanding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the task of 3D Intention Grounding (3D-IG), which aims to automate the reasoning and detection of target objects in real-world 3D scenes using human intention cues. To this end, the authors constructed the Intent3D dataset, comprising 44,990 intention texts across 209 fine-grained object categories, and developed several baseline models to evaluate various 3D object detection techniques. Finally, the authors proposed a novel method, IntentNet, which optimizes intention understanding and detection tasks through techniques such as verb-object alignment and candidate box matching, achieving state-of-the-art performance on the Intent3D benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1: There has been a few methods to combine 3D scene understanding with LLM beyond Chat3D v2, such as LL3DA, Grounded 3D-LLM, ReGround3D and so on. The paper does not highlight the advantages compared to them.\n\n2: The object selection method is too crude, as it removes some commonly used objects by humans when filtering Non-trivial Objects. Figures 3 (d) and (e) indicate that the dataset lacks sufficient diversity in the types of objects included.\n\n3: The limited variety of object category included in the dataset, fails to demonstrate the grounding effect for the missing objects in the dataset."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Will the proposed dataset and code be open-sourced?\n2. IntentNet seems to be a two-stage approach, where a pre-trained 3D detector first extracts proposals, which are then matched with text. Are there any one-stage methods that directly fuse 3D data and text to generate boxes? If so, the paper does not seem to provide comparisons with such methods.\n3. Regarding the training process, on what hardware was the method trained, and how long did the training take?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The overall quality of the paper is high, with clear writing and easy-to-understand presentation.\n2. The contribution of the dataset is significant, as it is the first to construct a 3D detection task focused on intention understanding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a 3D Intention Grounding (3D-IG) task and constructs a novel dataset called Intent3D (sourced from ScanNet data and generated using GPT). Additionally, it proposes a baseline model named IntentNet. I am not an expert in 3D-related fields, so my confidence in this review is not very high."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. More comparisons with recent works should be provided in Tables 1 and 2. Additionally, there is a minor mistake: the detector names “GroupFree” and “Group-Free” in the first two rows of Tables 1 and 2 do not match.\n2. The article gives a subtractive ablation experiment. I would like to see an additive ablation experiment, such as how the effect of verb alone works.\n3. The article does not give the performance of the proposed IntentNet in traditional 3D grounding."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the weakness part/"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of the 3D Intention Grounding (3D-IG) task could contribute to the 3D visual grounding community.\n2. The proposed Intent3D dataset is extensive, comprising 44,990 intention texts linked to 209 fine-grained object classes from 1042 3D scenes. This dataset provides a valuable resource for training and evaluating models in the context of human intention.\n3. The proposed modules in IntentNet are generally technically sound."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel framework for 3D object detection that integrates human intention into the detection process. The authors introduce the Intent3D dataset, aiming to enhance the model's understanding of human needs in real-world scenarios. The proposed method, named IntentNet, employs a multi-instance detection approach, where the model is tasked with identifying multiple instances of objects based on free-form textual descriptions of human intentions. The authors evaluate their approach against several baselines, including expert models designed for 3D visual grounding, foundation models for generic 3D understanding tasks, and Large Language Model (LLM)-based models. The evaluation demonstrates the effectiveness of IntentNet in achieving state-of-the-art performance on the Intent3D benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Object detection based on human intention is indeed a new task. However, why should we need to have a dataset and a model dedicated to this task? I think the detection based on human intention can be achieved in a modulized manner. For example, first, let the 3D detection module detect all types of objects in the scene. Then ask LLM to decide the subsets of the detected objects that can fulfill the human intention. I think this could be more flexible compared with training a dedicated detection model based on human intention prompts. \n2. In L210, it is mentioned that around six intention texts are generated per object. How do you determine this number (six)? Can it guarantee that all possible intentions can be covered and trained well?\n3. The contents around Eq (3) are hard to understand for me. What is t in Eq(3)? Although the authors claimed that the code would be released to facilitate the understanding, it would be better to add a figure to illustrate the connections in the network. \n4. Figure 4 is too abstract. Even though many connections and modules are included, it is not very informative. I suggest more detailed diagrams can be provided for the key modules further. \n5. Figure 5 shows that the verb alignment is very helpful for the prediction quality. Do you think this is caused by the limited training data? If more data is available for training, would this module still be essential?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024intentd,\ntitle={Intent3D: 3D Object Detection in {RGB}-D Scans Based on Human Intention},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5GgjiRzYp3},\nnote={under review}\n}"
},
"abstract": {
"value": "In real-life scenarios, humans seek out objects in the 3D world to fulfill their daily needs or intentions. This inspires us to introduce 3D intention grounding, a new task in 3D object detection employing RGB-D, based on human intention, such as \"I want something to support my back.\" Closely related, 3D visual grounding focuses on understanding human reference. To achieve detection based on human intention, it relies on humans to observe the scene, reason out the target that aligns with their intention (\"pillow\" in this case), and finally provide a reference to the AI system, such as \"A pillow on the couch\". Instead, 3D intention grounding challenges AI agents to automatically observe, reason and detect the desired target solely based on human intention. To tackle this challenge, we introduce the new Intent3D dataset, consisting of 44,990 intention texts associated with 209 fine-grained classes from 1,042 scenes of the ScanNet dataset. We also establish several baselines based on different language-based 3D object detection models on our benchmark. Finally, we propose IntentNet, our unique approach, designed to tackle this intention-based detection problem. It focuses on three key aspects: intention understanding, reasoning to identify object candidates, and cascaded adaptive learning that leverages the intrinsic priority logic of different losses for multiple objective optimization."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D Visual Grounding",
"3D Multimodal Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/33faa6e470c5c01bd252f731c42302f5b7c431fe.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Intent3D: 3D Object Detection in RGB-D Scans Based on Human Intention"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5GuhYMgaap | Inductive or Deductive? Rethinking the Fundamental Reasoning Abilities of LLMs | main | Active | Reasoning;LLM;Inductive;Deductive | foundation or frontier models, including LLMs | 3;5;5;5 | 4;3;3;4 | 2;2;3;3 | 1;2;3;2 | 3;2;4;2 | 4.5 | 3.5 | 2.5 | 2 | 2.75 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Q1. Has inductive and deductive reasoning not studied before in previous literature? I believe there are existing work or at least work on either of the categories. (for instance, for deductive reasoning, see: https://aclanthology.org/2023.findings-acl.67.pdf, https://openreview.net/forum?id=KFjCFxiGk4; for inductive reasoning, see: https://arxiv.org/abs/2309.05660). Please review existing literature and include them in your related work. Perhaps the distinction between the two has not been made explicit, which I believe is a fair contribution, but please acknowledge existing work.\n- Q2. How do you expect this to connect to benchmarking and evaluations of LLMs? How would this improve robustness of LLMs?\n- Q3. I think this work could benefit by considering the tension between memorization (as briefly discussed in the paper about models performing better on the examples seen during the pretraing phase) vs. in-context learning. What would be the connection of inductive/deductive reasoning and in-context examples in this framework? \n- Q4. Why is it important to distinguish deductive and inductive reasoning? (I believe it *is* important, but I wish the authors to consider this question. In my opinion, it could be useful particularly in the application of LLMs and improving performances of various symbolic reasoning involved generation such as code generation, scientific LLMs, or verification/formal language modeling with LLMs. Perhaps if the work was situated better and considered within the context of generation problems, the motivation behind this distinction would have been better argued in the paper.)\n- Q5. Do you intend to release the datasets and prompts used for the tasks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "I think the main strength of this task is that it provides a new way of looking at various ways we interact with LLMs. Using a framework of inductive and deductive reasoning, we can perhaps consider in-context learning as a deductive reasoning task and code generation as an inductive reasoning task. I think it would have been more salient to frame the paper this way, and therefore, more relevant to many other communities, particularly with LLM evaluation communities. Below describes the strengths in more specifics.\n\n- S1. This paper discusses the distinction between inductive and deductive reasoning, and how we may systematically investigate this using a novel framework.\n- S2. In providing a novel framework, they provide a novel task of their own design. A cypher description task . This is a particular strength, because many available evaluative framework and benchmarks could have already been used in pretraining of many closed LLMs. With an introduction of a novel task, they can robustly test.\n- S3. This potentially adds a novel way of looking at code generation and reasoning together. \n- S4. I can see how we use in-context learning could be considered from the perspective of inductive and deductive reasoning with this framework.\n- S5. The framework effectively describes a spectrum between deductive and inductive reasoning. Inductive and deductive reasoning are not always distinctly delineated as I had previously conceptualized, so it was interesting for me to consider.\n- S6. The paper was well written with mostly clear description.\n- S7. This framework and subtasks were thoroughly experimented with many current SOTA LLMs"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies LLM capabilities in inductive and deductive reasoning, and compares the performance gap between the two poles of reasoning. They consider this by framing reasoning as a function (which connects input and output) definition task, with\n- deductive: the model is provided with the function (direct input-output mappings)\n- inductive: the model is given examples (x,y) pairs but without the function \n\nWith the framework defined, they test the reasoning processes of LLMs across 4 primary subtasks: arithmetic, basic syntax reasoning (syntactical recognition and identification), spatial reasoning, and a novel cipher decryption task of their own design. Their finding suggests that LLMs seem to be stronger inductive reasoners rather than deductive. In particular, tasks that involve counterfactual reasoning are particularly challenging even with strong inductive performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "As pointed out in the strengths, I think this paper has the potential to make us consider looking at how we interact with LLMs in a novel way. However, I think the formulation of the question and presentation of the main thesis could be significantly improved. The paper does not adequately situate itself with existing literature on reasoning, or discuss the relations between this work with respect to code generation or in-context learning.\n\n- W1. The paper and their findings were hard to connect to existing work. I think it would have made the paper stronger to consider how other work in reasoning area compare with this approach. There have been numerous work on deductive, inductive, abductive, counterfactual etc reasoning. I think there was very few discussion on the prior work, and therefore, this work was poorly situated. \n\n- W2. The framework relies on a particular case of generation, which is code generation. I think the performances could very much differ in the case of natural language generation and inductive reasoning. I think it's insufficient to generalize the findings of this paper to the broad deductive/inductive reasoning gap of LLMs\n\n- W3. \"Current methods that investigate deductive and inductive reasoning often rely on disparate datasets\" may not true: LogiGLUE (https://arxiv.org/pdf/2310.00836) for example considers both categories in their datasets.\n\n- W4. I believe there were prior work on inductive and deductive reasoning, and some of these prior work does discuss the gap between them. On the claim of novelty, I believe that the question itself is not novel enough. The framework may be novel.\n\n- W5. This work does not consider finetuned models, but it would have been interesting to consider them, particularly in discussion with deductive/inductive reasoning and seen examples. The paper does mention some probable explanation for some performance gaps on examples seen/unseen during pretraining.\n\n- W6. I think there could have been discussion on how this relates to the code generation performance. There are many existing benchmarks for code generation (e.g. BigCodeBench, HumanEval), and because this framework relies on code generation for an external executor, it should be discussed as what these evaluative benchmarks and testing on with respect to deductive/inductive reasoning. \n\n- W7 The writing could be improved in some parts of the paper. I found the deductive part to be a bit lacking in discussion.\n\n\nI believe this work has a lot of potential and it was very interesting to read about this framework! I hope to see this work out, but I wish it was more thoroughly considered and better presented/situated in connection with existing work in the field."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Other problems/questions:\n\nI understand why you use chat-gpt, but it does make your work non-reproducible. It would be helpful to complement results with an open-source system (it would also help in making your conclusions more general).\n\nFunction Execution: if you do it outside the LLM, is it still a LLM?\n\n8-IO w/ Mapping Function (MF): is this deductive or a mix?\n\nThe results show a noticeable improvement between chat-gpt 3.5 and 4. ANy idea why?\n\nThere are a few typos and in a few cases bad english \"\n\nWu23 Paper: why only datasets from this work. It seems highly related, why is it not discussed?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors present several contributions to a major research domain in IA:\nThey introduce a notion of several forms of reasoning going on the system,\nThey implement a system to validate their claims.\nThey experimentally obtain unexpected results \n\nThe paper is also rather easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper starts from the observation that there are two forms of reasonung in LLMs: inductive and deductive. The authors empirically study prformance of the two approaches, and conclude that LLMs do better with induction."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have two major difficulties with the paper:\n\nAs the authors themselve observed, the two forms of reasoning are very much intertwined. ALthough Fig 1 does a nice job at explaining the concepts, as i read I felt the need for a more formal definition of what is induction and what is deduction. This is especially true when I looked at the discussion, and I felt I could not understand why statements such as \n\". By\ncompletely disentangling the inductive reasoning of LLMs, our proposed SolverLearner shows the\nremarkable inductive reasoning capabilities inherent in LLMs.\"\n\nI also felt the notions of induction and deduction may take somewhat different meanings for different researchers\n\nSecond, I would have hoped for a deeper insight into these results. You mention the remarkable induct reasoning of LLMs, but it would be nice (at least for me) to understand how they appear. Also, why deduction performs worse?\n\nFunction execution: you state it takes place outside the LLM (eg, in Python). Why are the differences so big (Table 13?)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Some additional comments about the novelty of the proposed evaluation and its significance in LLMs would be useful."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Strengths\n- Disentangling the inductive and deductive capabilities of LLMs seems like an interesting problem\n- The types of benchmarks used are varied and several of the state-of-the-art LLMs have been considered in the evaluation"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The proposed approach aims to disentangle deductive from inductive capabilities of an LLM. The main contribution is a series of tasks where each task is has both an inductive as well as a corresponding deductive component. The results show that LLMs perform more poorly in deductive reasoning as compared to inductive reasoning on the tasks designed to test both."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weakness\n- The paper seems to suggest that solverlearner is a novel approach, but it is less clear why this is the case. As far I could understand, solver learner just utilizes an external code interpreter to apply the functions learned by the LLM inductively. I was not clear of the complexity involved to do this, since the approach itself is not described in detail. Further, are there other ways of decoupling the two, since there was not a lot of context in why this is the right way for disentanglement.\n- The tasks itself also seem to be from prior work (Wu et. Al 2023) apart from the cipher task. Once again, I was not sure if the contribution of the tasks was significantly different from prior work.\n- Regarding the foundational aspect as such, based on the definition of deductive/inductive inference, since the LLMs are being used a bit like black-boxes, I was not sure about the leap from observing the experimental results to concluding the “type” of inference the LLM is truly performing internally. For e.g. memorization is one aspect that could be affecting the way a LLM is solving a particular task.\n- In terms of the significance of the study, is the fact that deduction is harder than induction significant, i.e., what would be a good application use-case to motivate this study is something that was missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The paper claims that SolverLearner isolates inductive reasoning, but this separation is not convincingly demonstrated. For example, in the arithmetic task of base-8 addition, the process of identifying the base from examples is considered as inductive reasoning. I wonder if the authors could provide more convincing evidence to show that the model is truly performing inductive reasoning instead of simply pattern matching based on prior exposure to similar tasks?\n\nNote that, using Python interpreters to prevent LLM involvement in the \"deductive\" step (function execution) does not fully eliminate the possibility that LLMs leverage both types of reasoning in the previous “inductive\" step. The distinction remains unclear because the task structure could involve deductive elements when identifying input-output mappings."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "(1) The presentation is clear, fluent, and easy to follow. The task formulation is clear, and the introduction of the proposed framework is transparent.\n\n(2) The topic of reasoning ability of LLMs is crucial and in need of exploration.\n\n(3) The experiments are detailed introduced, with all settings and prompts attached in the appendix."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper evaluates the inductive and deductive reasoning capabilities of large language models using a framework named SolverLearner. Through tasks including arithmetic, syntactic reasoning, spatial reasoning, and cipher decryption, the authors claim that LLMs perform well in inductive reasoning but struggle with deductive reasoning tasks. The topic of reasoning ability of LLMs is interesting and important. The overall presentation of the paper is clear and fluent."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The task setup does not adequately reflect real-world scenarios. The tasks used in the evaluation, particularly the arithmetic tasks across different number bases and synthetic syntactic tasks, are highly artificial and contrived. These tasks do not reflect realistic reasoning challenges that LLMs would face in natural language processing or human cognition. For example, arithmetic problems in base-9 or base-11 are not commonly encountered in real-world settings. The syntactic reasoning tasks are also simplistic, relying on predefined sentence structures with fixed subject-verb-object patterns. More realistic scenarios, such as understanding context-dependent syntactic reordering or handling ambiguous language, would make the evaluation more robust and relevant to practical applications.\n\n(2) The proposed framework has limited generalizability. The proposed SolverLearner, while effective for some inductive tasks, does not generalize well to broader inductive reasoning challenges. The tasks in the study are highly structured and constrained (e.g., learning the mapping function in base-specific arithmetic), where a unique solution exists for the inductive task. In more complex scenarios, such as reasoning about abstract concepts, learning open-ended rules, or inducing general principles from noisy data, the SolverLearner framework may not be effective. The paper does not discuss how this method could scale to such more complex inductive challenges, where the learning task is not well-defined and may involve multiple plausible solutions.\n\n(3) Comparison with other reasoning frameworks is insufficient. The paper fails to adequately compare its results with alternative approaches to reasoning in LLMs, such as chain-of-thought prompting, least-to-most prompting, or retrieval-augmented generation. While SolverLearner is presented as a novel method for isolating inductive reasoning, the lack of comparison with existing techniques leaves its relative merits unclear. For example, chain-of-thought prompting has been shown to improve both inductive and deductive reasoning in various tasks by breaking complex problems into smaller reasoning steps. Without a direct comparison, it is difficult to assess whether SolverLearner offers any significant advantage over these established methods. Including such comparisons would have strengthened the evaluation.\n\n(4) The scope of deductive evaluation is too narrow. The deductive reasoning tasks primarily focus on counterfactual arithmetic (e.g., base-9 vs. base-10 arithmetic), which is a very specific case. Deductive reasoning encompasses more than just counterfactual logic—it includes formal logic, rule-based reasoning, and mathematical proofs. The paper does not evaluate these broader aspects of deductive reasoning, such as tasks that involve symbolic logic, proof generation, or formal theorem proving. This limited scope weakens the claim that LLMs perform poorly in deductive reasoning overall. For example, the study might have included tasks like syllogisms or multi-step logical deductions, which would provide a broader view of LLMs' deductive reasoning capabilities."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024inductive,\ntitle={Inductive or Deductive? Rethinking the Fundamental Reasoning Abilities of {LLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5GuhYMgaap},\nnote={under review}\n}"
},
"abstract": {
"value": "Reasoning encompasses two typical types: deductive reasoning and inductive reasoning. Despite extensive research into the reasoning capabilities of Large Language Models (LLMs), most studies have failed to rigorously differentiate between inductive and deductive reasoning, leading to a blending of the two. This raises an essential question: In LLM reasoning, which poses a greater challenge - deductive or inductive reasoning? While the deductive reasoning capabilities of LLMs, (i.e. their capacity to follow instructions in reasoning tasks), have received considerable attention, their abilities in true inductive reasoning remain largely unexplored due to the inseparability of the two types of reasoning in most of the tasks. To delve into the true inductive reasoning capabilities of LLMs, we propose a novel framework, SolverLearner. This framework enables LLMs to learn the underlying function (i.e., $y = f_w(x)$), that maps input data points $(x)$ to their corresponding output values $(y)$, using only in-context examples. By focusing on inductive reasoning and separating it from LLM-based deductive reasoning, we can isolate and investigate inductive reasoning of LLMs in its pure form via SolverLearner. Our observations reveal that LLMs demonstrate remarkable inductive reasoning capabilities through SolverLearner, achieving near-perfect performance with ACC of 1 in most cases. Surprisingly, despite their strong inductive reasoning abilities, LLMs tend to relatively lack deductive reasoning capabilities, particularly in tasks involving ``counterfactual'' reasoning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reasoning",
"LLM",
"Inductive",
"Deductive"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e51d287b1db29fa943dbd13700ed1ca4c94f5d1a.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Inductive or Deductive? Rethinking the Fundamental Reasoning Abilities of LLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5I39Zvlb3Y | Collu-Bench: A Benchmark for Predicting LLM Hallucinations in Code | main | Active | large language model;hallucination;code generation;automated program repair;benchmark | datasets and benchmarks | 3;3;5;5;5 | 4;4;3;3;5 | 2;2;2;3;3 | 2;1;2;2;3 | 2;2;3;2;3 | 4.2 | 3.8 | 2.4 | 2 | 2.4 | -0.218218 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide specific case studies? Do they examine whether certain types of programming tasks or problem structures are more likely to trigger hallucinations? Providing a more detailed error analysis would be helpful, especially in cases where hallucinations are misidentified or overlooked. Are there specific features or patterns that lead to these errors?\n\n3. The authors present a large evaluation dataset, which in practice may make it challenging for researchers with limited computational resources to replicate the results. For instance, the authors themselves do not use all 2,294 entries in SWE-Bench. Do the authors have any specific measures to address this issue?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Collu-Bench differs from previous benchmarks by focusing on finer-grained code hallucinations, providing a new benchmark that includes richer features such as log probabilities and execution feedback. It aims to deepen understanding and predict where hallucinations occur.\n\n2. The authors write the paper clearly, emphasizing the importance of the problem. The structure of each section is well-organized, making it easy to understand the motivation, methodology, experimental setup, and conclusions of Collu-Bench.\n\n3. The authors execute their experiments effectively, from benchmark construction to analysis and results. They offer detailed descriptions of the findings, complemented by visualizations of experimental results, which enhance the persuasiveness of the conclusions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce Collu-Bench, a benchmark designed to evaluate code hallucinations in LLMs. This benchmark includes 13,234 instances of code hallucinations from 11 different LLMs across five datasets, covering both code generation and automated program repair tasks. Collu-Bench’s innovation lies in its automated process that combines program equivalence and identifier variation to locate hallucinated tokens accurately. The benchmark provides detailed signals, including the log probability at each step, token types, and execution feedback. The authors conduct preliminary experiments using traditional machine learning and neural network methods to predict hallucinations, with prediction accuracy ranging from 22.03% to 33.15%. Overall, this benchmark aims to advance the understanding, prediction, and mitigation of hallucinations in automated code generation and program repair tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors provide an introduction in Section 3 on how Collu-Bench is constructed and how they generate the Ground Truth. However, I am concerned about the accuracy and quality of the Ground Truth generation method. Despite performing a manual review, the authors achieve only an 86% accuracy rate, which introduces potential bias during evaluation. Moreover, the sample size for manual verification (100 samples) is relatively small compared to the dataset’s scale. How do the authors address the issue of low Ground Truth quality?\n\n2. The detection of hallucinations relies on comparing the generated code with a \"standard\" solution, which may not cover all possible correct solutions, potentially leading to inaccurate hallucination detection. How do the authors address this issue to ensure more accurate hallucination detection?\n\n3. In Sections 5.1 and 5.2, the authors merely describe the experimental results without providing detailed analysis. Could they offer more specific insights into why these experimental results occur? For example, why does GPT-4o-mini exhibit the most unique hallucination patterns? Why does the predictor trained on Llama3-8B data generalize well to content generated by most other LLMs? And why do Transformer models perform with relatively low accuracy on Collu-Bench?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- How do you define hallucination on coding tasks? How does it compare to the bug localization task?\n\n- Discussions about hallucination localization in dataset creation:\n - The per-token hallucination localization method in section 3.3 still looks weak to me after canonical solution sampling. The proposed methods addresses the problem of \"identifier variability\", but how to tackle the problem of semantically identical problems? For example, how do you detect the hallucination location if the ground truth is `return all(v1 > v2 for v1, v2 in zip(tup1, tup2))` and the generation is \n ```python\n for v1, v2 in zip(tup1, tup2):\n if not v1 < v2:\n return False\n return True\n ```\n An error rate of 14% is reported in section 4.2. How does this affect the usability of the dataset? Is it possible to provide a clean subset of the dataset to train localizers to figure out the impact of wrong annotations?\n\n\n - In section 3.3,\n > As there could be multiple unique normalized canonical solutions per problem, we calculate the hallucination token indices between the LLM-generated program and every unique canonical solution and eventually take the largest hallucination token index.\n\n What is the reason and how accurate is the design of taking the largest index? Moreover, how do you handle multiple hallucinations in the code? Will keeping only one hallucination index cause false negatives training detectors?\n\n- Table 1 shows a major source of hallucinations is *keyword*. However, is it related to the process of program normalization?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper provides a dataset with rich information to analyze hallucination in coding tasks\n- The authors reveal patterns of code hallucinations across data sources and LLMs"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents Collu-Bench, a benchmark for detecting code hallucinations in outputs from large language models. With over 13,000 instances from 11 models, it helps assess hallucination localization using various data points. It highlights the challenge and need for improved LLM reliability in coding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The method for ground truth hallucination localization is overly simplistic and may not apply to complex cases, despite the method proposed in section 3.1 (see Questions)\n- The finding of \"LLMs are less confident when hallucinating\" is not novel and has been widely used for detecting hallucinations, e.g. [1], [2], [3], to name a few. However, I appreciate the authors' experiments studying finer-grained hallucination positions in coding tasks. The authors should emphasize more on their new findings specifically on this domain.\n- The localization methods only take the probability distribution of top-100 tokens into account, without considering the semantic meanings of the tokens, nor the execution feedbacks. \n- More hallucination detection baselines should be discussed and compared.\n- Lack of discussion of the proposed \"code hallucination\" vs bug localization.\n\n[1] Xiao, Yijun, and William Yang Wang. \"On Hallucination and Predictive Uncertainty in Conditional Language Generation.\" Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 2021.\n\n[2] Guerreiro, Nuno M., Elena Voita, and André FT Martins. \"Looking for a Needle in a Haystack: A Comprehensive Study of Hallucinations in Neural Machine Translation.\" Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics. 2023.\n\n[3] Zhang, Tianhang, et al. \"Enhancing Uncertainty-Based Hallucination Detection with Stronger Focus.\" Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How can we ensure that the code sampled from LLMs that fails to pass test cases is also seemingly reasonable to humans and likely to be misused?\n2. Does the normalization process of the code in this paper potentially destroy or lose the semantics of the original code?\n3. Why are some LLMs more prone to generating hallucinatory code, while others are not as likely to produce such code?\n4. Why do some methods perform poorly/well, and what are the reasons for their poor/good performance?\n5. How to identify and correct errors in the dataset?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper clearly defines the problem of code hallucination in LLMs and provides a comprehensive benchmark for research in this area. 2. The inclusion of diverse LLMs and datasets is a significant contribution to the field.\n3. The paper presents a well-structured approach to collecting and analyzing code hallucination instances. The automated pipeline for handling program equivalency and identifier variability is innovative and adds value to the benchmark.\n4. The experiments conducted using traditional machine learning techniques and neural networks are thorough and provide valuable insights into the patterns of code hallucination. The findings highlight the challenges and potential areas for future research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper successfully introduces Collu-Bench, a challenging benchmark for code hallucination localization. It includes 13,234 hallucination instances generated by 11 diverse LLMs across five datasets, offering a comprehensive evaluation of hallucination localization across multiple models. Furthermore, Collu-Bench provides additional information such as per-step log probabilities produced by LLMs, types of generated tokens, and execution feedback, which are useful signals for predicting code hallucinations. Through extensive experiments using traditional machine learning techniques and neural network models as hallucination predictors, the paper provides an in-depth study of hallucination localization using Collu-Bench. Preliminary results indicate that traditional ML methods and neural networks can only achieve an accuracy of up to 33.15%, highlighting the complexity of this task and emphasizing the need for further research to improve the trustworthiness and reliability of LLMs in code-related applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Code models generate hallucinatory code, what kind of code can be referred to as hallucinatory code? The definitions of hallucinatory code and hallucinatory tokens in the text are inaccurate. In the abstract section, the authors mention \"content that sounds plausible but is actually incorrect\", this definition is too vague. In the construction of Collu-Bench, the authors consider samples that fail to pass test cases as hallucinatory code and the first token that differs from the standard solution as the hallucinatory token. This is clearly not accurate enough. Failing to pass test cases indicates that the code is incorrect, but it does not necessarily mean it is hallucinatory code.\n\n2. While less attention has been given to hallucinations in source code as mentioned in the abstract, there are still several works that address this issue. The paper needs to compare the Collu-Bench dataset with other efforts, such as CodeMirage and CoderEval, to highlight their differences.\n- CodeMirage: Hallucinations in Code Generated by Large Language Models. https://arxiv.org/abs/2408.08333\n- CoderEval: A Benchmark of Pragmatic Code Generation with Generative Pre-trained Models. https://arxiv.org/abs/2302.00288\n\n3. Hallucinatory code should be deceptive code that appears reasonable to humans but is actually incorrect. How can we ensure that the code sampled from LLMs that fails to pass test cases is also seemingly reasonable to humans and likely to be misused?\n\n4. The purpose of the dataset is to reduce the likelihood of LLMs generating hallucinatory code. However, the dataset is primarily used to enhance the model's ability to predict hallucinatory code and hallucinatory tokens. Enhancing the model's predictive capabilities for hallucinatory code and tokens does not necessarily reduce the probability of LLMs generating hallucinatory code.\n\n5. Does the normalization process of the code in this paper potentially destroy or lose the semantics of the original code?\n\n6. In the process of constructing the dataset, it is taken for granted that code that fails to pass test cases is considered hallucinatory code. In reality, such code is not equivalent to hallucinatory code. The dataset constructed in this way contains both \"hallucinatory code\" and \"code with obvious errors that do not cause hallucinations.\" If \"code with obvious errors that do not cause hallucinations\" is not excluded, then the dataset itself has issues, and all subsequent results lack a solid foundation.\n\n7. The extent to which LLMs produce hallucinatory code in the dataset construction lacks explanation. Why are some LLMs more prone to generating hallucinatory code, while others are not as likely to produce such code?\n\n8. The article mentions and briefly compares CodeHalu and HalluCode, both of which classify and define code hallucinations. However, the concept of hallucinatory code in this paper is vague. The authors should also provide a detailed definition of the concept of hallucinatory code and categorize them.\n\n9. The results of various experimental models on the Collu-Bench dataset lack detailed explanations. Why do some methods perform poorly/well, and what are the reasons for their poor/good performance?\n\n10. The Collu-Bench dataset currently covers only Java and Python languages. It would be beneficial to construct a dataset that includes more mainstream programming languages, such as C, C++, and Go.\n\n11. Consider conducting a more overall human evaluation of the dataset's quality and the accuracy of annotations.\n\n12. The dataset relies on LLMs for annotation, but LLMs are not fully reliable, this may lead to incorrect token locations. How to identify and correct errors in the dataset?\n\n13. Despite the reduction, the error rate remains relatively high, with 14 out of 100 randomly sampled instances flagged as questionable. How can the error rate be further lowered?\n\n14. The paper could benefit from a more detailed discussion of the implications of the findings and how they relate to existing work in the field."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How effective is the automated sampling process in capturing a comprehensive set of canonical solutions, especially for more complex tasks in Defects4J and SWE-bench datasets?\n\nWhat are the limitations of the program normalization technique in accurately detecting hallucinations? Are there instances where the normalization process might incorrectly standardize genuinely distinct solutions?\n\nIn cases where the generated code subtly deviates from the canonical solutions, how does Collu-Bench ensure that the hallucination token is accurately identified without oversimplifying or introducing false positives?\n\nWhat criteria were used to select the five specific datasets, and how might additional datasets impact Collu-Bench’s robustness and versatility?\n\nThis paper includes 11 LLMs of various sizes and types. What is the reasoning behind selecting these specific models, and how might the inclusion of more recent or specialized models impact the benchmark’s findings?\n\nWhy do certain token types, like Keywords and Identifiers, appear more susceptible to hallucinations? Could this be influenced by the specific training data or architecture of the LLMs?\n\nThe analysis highlights different hallucination patterns across datasets, such as Defects4J showing a high hallucination rate for Operators and Identifiers. What underlying factors in these datasets contribute to these distinct hallucination profiles?\n\nHow does the per-token prediction approach compare with a per-example prediction regarding interpretability and practical application? Are there scenarios where one approach is more advantageous?\n\nTraditional ML models like Random Forest perform better in specific setups, while neural networks excel in others. What characteristics of hallucination prediction tasks make certain model types more suitable, and could a hybrid model improve results?\n\nThe highest accuracy achieved was around 33.15%. What are the main barriers to achieving higher accuracy, and are there known model improvements or alternative feature sets that could be integrated to boost predictive performance?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Collu-Bench includes a comprehensive set of 13,234 instances across diverse LLM models and coding tasks.\n\nProvides valuable, fine-grained data such as log probabilities, token types, and execution feedback to support hallucination analysis.\n\nExperiments reveal key patterns, like low confidence during hallucinations and higher hallucination rates for specific tokens."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study introduces Collu-Bench, a benchmark specifically designed to identify and analyze hallucinations in code generated by large language models (LLMs), addressing gaps in current research on code hallucinations. Collu-Bench includes 13,234 instances from five datasets produced by 11 different LLMs, focusing on two key tasks: code generation (CG) and automated program repair (APR). It provides detailed features such as per-step log probabilities, token types, and execution feedback for fine-grained analysis and prediction. Experiments using traditional machine learning and neural network models achieve a maximum accuracy of 33.15%, underscoring the challenge of this task. Findings reveal that LLMs show lower confidence in hallucinated outputs and are more prone to hallucinations with specific token types, highlighting the need to improve LLM reliability and accuracy in code generation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Achieved accuracy limits immediate applicability in practical settings.\n\nExcludes state-of-the-art models, potentially reducing relevance to newer LLM architectures.\n\nFocuses only on code generation and repair, missing other critical coding applications affected by hallucinations.\n\nIdentifies patterns but lacks actionable approaches to reduce hallucinations in practice."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Before comparing the hallucinated code generated by the model with canonical solutions, do you use methods such as unit tests or program analysis to determine whether the code does not meet the intended generation?\n\n2. Did you remove comments when processing model-generated data, as many models, such as GPT-4, may include annotations for the generated statements?\n\n3. refer to weakness 3, what are the differences between this benchmark and tasks like program review or bug localization?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is easy to follow.\n\nFrom the perspective of model confidence, the paper identifies distinct patterns between hallucinated tokens and correctly generated tokens."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper constructs a benchmark containing hallucinated code generated by different LLMs. It also annotates the positions of hallucination tokens, aiming to identify where the model starts exhibiting hallucination behavior. The authors analyze from the perspective of model confidence and the types of hallucinated tokens, discovering corresponding patterns—for instance, models generally have lower confidence when dealing with hallucinated tokens. Additionally, they use some basic machine learning and deep learning models to identify model hallucinations and evaluate these predicters in different settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe that \"hallucination in code\" is fundamentally an ill-defined term, and it is inherently challenging to define. Specifically, in this work, it seems that hallucinated code and incorrect/buggy code are treated as entirely equivalent. Therefore, I think using this term without a rigorous definition is neither precise nor reliable.\n\n2. The finding that models exhibit low confidence on hallucinated tokens is very interesting. However, relying solely on token confidence to achieve high identification accuracy is insufficient. Currently, the performance of per-token prediction and per-sample prediction is quite similar, which indicates that the model heavily depends on the confidence feature for identification. However, I believe that this task should be analyzed more from the semantic perspective of the code, which might achieve higher accuracy. For instance, a naive approach, such as having the model review its own generated code, might yield decent identification accuracy.\n\n\n3.This task does not seem fundamentally different from bug localization or program review. The objective in all cases is to identify parts that do not meet the code generation requirements. Program review, in particular, is even more challenging as it involves not only identifying but also correcting these parts.\n\n4.Even though the authors considered diverse canonical solutions, I believe that using text-based comparisons for data annotation remains imprecise, as there is no guarantee that the range of canonical solutions covers all possible solutions adequately."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024collubench,\ntitle={Collu-Bench: A Benchmark for Predicting {LLM} Hallucinations in Code},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5I39Zvlb3Y},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite their success, large language models (LLMs) face the critical challenge of hallucinations, generating plausible but incorrect content. While much research has focused on hallucinations in multiple modalities including images and natural language text, less attention has been given to hallucinations in source code, which leads to incorrect and vulnerable code that causes significant financial loss. To pave the way for research in LLMs' hallucinations in code, we introduce Collu-Bench, a benchmark for predicting code hallucinations of LLMs across code generation (CG) and automated program repair (APR) tasks. Collu-Bench includes 13,234 code hallucination instances collected from five datasets and 11 diverse LLMs, ranging from open-source models to commercial ones. To better understand and predict code hallucinations, Collu-Bench provides detailed features such as the per-step log probabilities of LLMs' output, token types, and the execution feedback of LLMs' generated code for in-depth analysis. In addition, we conduct experiments to predict hallucination on Collu-Bench, using both traditional machine learning techniques and neural networks, which achieves 22.03 - 33.15% accuracy. Our experiments draw insightful findings of code hallucination patterns, reveal the challenge of accurately localizing LLMs' hallucinations, and highlight the need for more sophisticated techniques."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language model",
"hallucination",
"code generation",
"automated program repair",
"benchmark"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c7222d13e2c3448fdf7b7853831b88d9959d80b6.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Collu-Bench: A Benchmark for Predicting LLM Hallucinations in Code"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5IBrWCeZtl | Co-Evolution Learning | main | Active | Generative Models;Representation Learning | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5 | 3;4;4 | 2;3;2 | 2;1;3 | 2;3;3 | 3.666667 | 3.666667 | 2.333333 | 2 | 2.666667 | 0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please address my concern about the novelty, and justify why the proposed model is significantly different from autoencoders with latent reconstruction loss."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed approach is easy to understand, and provides moderate performance improvement.\n- The paper is well structured and presented.\n- Some experiments provide some useful insights."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors propose to learn simultaneously a representation model and a generative model following a mutual feedback loop. One path (R2G) uses the embeddings provided by the representation model to guide the learning of the generative model. The other path (G2R) leverages the generated images as augmented data to train the representation model. The combination of both is referred to as co-evolution (CORE). The experiments show that this setting improves the performance in both generative and representation models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In my opinion the novelty is very limited. R2G is equivalent to an autoencoder with a pretrained and fixed encoder, and G2R is equivalent to an autoencoder with reconstruction loss in the latent space, i.e. $l_{rec}\\left(\\hat{z},z\\right)$ with $z=f_1\\left(x\\right)$ and $\\hat{z}=f\\left(g\\left(z\\right)\\right)$, where the first encoder and the decoder are pretrained and fixed. These settings and their combination (i.e. CORE) have been extensively used in the context of autoencoders and image-to-image translation models (and cross-modal translation models). [A-D] are some early examples that come to mind with similar setting. The main difference is the use of more modern generative models (diffusion), but that is not novel in my view.\n\n[A] Unsupervised cross-domain image generation, ICLR 2017\n[B] MUNIT: Multimodal Unsupervised Image-to-Image Translation, ECCV 2018\n[C] Perceptual Generative Autoencoders, ICML 2020\n[D] Mix and match networks: encoder-decoder alignment for zero-pair image translation, CVPR 2018"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "-- How practical it is to implement this framework as the learning is iterative instead of end-to-end?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "-- The proposed method empirically found that co-training can boost the performance of generative models training efficiency by 30%\n\n-- The proposed Co-evolution of Representation modelsand Generative models (CORE) frame work is novel and interesting"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a co-learning framework called CORE to jointly learn the representation and generative models. Specifically, it has two components, R2G framework which uses pretrained representation vision encoder to project data into latent space z, and learn a generative models by maximizing the log-likelihood conditioned on the z. The second component is G2R, which can sample diverse data points and can be used to learn a better latent representation. Experiments show that co-evolving these two components can facilitate the task performance for representation/generative tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "--The paper is a bit hard to follow, for example, it is not clear what the main contribution of this framework after reading the introduction\n\n--Experiments only conducted on small-scale dataset, CIFAR10/100 etc, where both SoTA generative models or representation learning methods already mastered and hard to tell if the performance come from parameter tuning or joint learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The idea of co-evolution is interesting. It combines the two tasks in a unified framework and tries to help their corresponding model to improve each other in the mutual enhancement process.\n2. The paper is well-organized, starting with a clear introduction of the current limitations and a detailed breakdown of the design of the proposed framework."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tackles a key challenge in advancing generative and representation models: the dependence on high-quality, diverse data for training. To address these limitations, the authors introduce a co-evolution framework that enables generative and representation models to improve each other. Both representation and generation models progressively strengthen their performance by iterating through this mutual enhancement process."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The use of a milder data augmentation strategy may have a limited impact on enhancing dataset diversity. Additionally, there is no ablation study to verify the effectiveness of this approach, even in Table 8, leaving its actual contribution to performance unclear.\n2. An interesting observation in Table 2 is that using a weak generation model leads to a decline in the performance of the trained representation model. However, there is no analysis provided on this phenomenon or its potential risks, which would be valuable for understanding the limitations and stability of the proposed framework.\n3. In the experiments across different datasets in Section 4.3, the generation model implementations vary, yet there is no clear explanations provided for these choices. \n4. In the co-evolution experiments, it is unclear whether the generation model is trained from scratch or utilizes pre-trained generative capabilities. This lack of clarification makes it difficult to discern the true source of the observed training benefits."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024coevolution,\ntitle={Co-Evolution Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5IBrWCeZtl},\nnote={under review}\n}"
},
"abstract": {
"value": "Generative and representation models, whether trained independently or evolved separately, require high-quality, diverse training data, imposing limitations on their advancement.\nSpecifically, self-supervised learning, as a popular paradigm for representation learning, decreases the reliance on labeled data in representation models.\nHowever, it still necessitates large datasets, specialized data augmentation techniques, and tailored training strategies.\nWhile generative models have shown promise in generating diverse data, ensuring semantic consistency is still a challenge.\nThis paper introduces a novel co-evolution framework (referred to as CORE) designed to address these challenges through the mutual enhancement of generative and representation models.\nWithout incurring additional, unacceptable training overhead compared to independent training, the generative model utilizes semantic information from the representation model to enhance the quality and semantic consistency of generated data.\nSimultaneously, the representation model gains from the diverse data produced by the generative model, leading to richer and more generalized representations.\nBy iteratively applying this co-evolution framework, both models can be continuously enhanced.\nExperiments demonstrate the effectiveness of the co-evolution framework across datasets of varying scales and resolutions.\nFor example, implementing our framework in LDM can reduce the FID from $43.40$ to $20.13$ in unconditional generation tasks over the ImageNet-1K dataset.\nIn more challenging scenarios, such as tasks with limited data, this framework significantly outperforms independent training of generative or representation model.\nFurthermore, employing the framework in a self-consuming loop effectively mitigates model collapse.\nOur code will be publicly released."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Generative Models",
"Representation Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/69307fa8dcfb22726951008b69b4bed9867ebb50.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Co-Evolution Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5IWJBStfU7 | Everything, Everywhere, All at Once: Is Mechanistic Interpretability Identifiable? | main | Active | AI interpretability;mechanistic interpretability;causal consistency;explanatory algorithms;circuits | interpretability and explainable AI | 5;6;6;8 | 3;4;3;5 | 2;2;2;3 | 2;3;3;4 | 3;3;4;3 | 6.25 | 3.75 | 2.25 | 3 | 3.25 | 0.899229 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "If the training set is drawn from a distribution with certain biases, there may be correlations that essentially encourage multiple “conflicting” interpretations of a network. Can we resolve some of the issues that arise by putting conditions on the training distributions?\n\nWhat would (be expected to) happen if, in a simple toy example, an experiment was repeated with a perfect training error, with or without overfitting? Would we see a qualitatively different distribution of explanations?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The manuscript is clearly written, with a good explanation of the context the work is placed in. The toy examples provided are illustrative. The overarching conclusion is that uniqueness of an explanation should generally not be expected within the context of mechanistic interpretability, and while a similar analysis cannot be conducted on large-scale models, it is likely that the same behavior could be expected. This insight is important in many practical applications where network interpretations are required (post-training) by a practitioner."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "A summary of mechanistic interpretability is provided. Two approaches are proposed that attempt to interpret a “simpler” algorithm that emulates the behavior of a trained neural network in terms of a circuit. One approach focuses on modelling the behavior of the full network before finding a subset of related nodes, while the second approach focuses on finding an “important” sub-network of the full network, whose behavior is then interpreted. Both approaches are showcased in simple toy examples."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The issue of uniqueness of an explanation is addressed in the context of mechanistic interpretability. However, the “incompatibility” of different explanations is not substantially addressed. A more formal framework in which incompatibility can be “measured” would be very interesting, along with analyzing questions on differentiating between equivalence classes of explanations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you elaborate on the meaning of “incompatible” in line 355? The paper would benefit from a clear example of two incompatible explanations, ideally in the main body.\n\n2. Please include in the appendix random examples of some of the circuits found so that they can be qualitatively assessed by readers.\n\n3.Could you comment on how these results relate to the discussion of Makelov et al. (2023) and Wu et al. (2024), cited above in section “Strengths”?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "A rare case of compelling and relevant deconfusion experiments which doesn’t devolve into over-claims. The authors do well in the discussion highlighting that identifiability may not be needed for all applications of mech interp, and that the non-identifiability observed here is on toy-models, so might not extend to larger models trained on multiple tasks.\n\nThe “what-then-where” strategy implemented here appears to solely utilize an approach based on Distributed Alignment Search (DAS), which makes the results about non-identifiability quite relevant to recent discussion:\n\nMakelov, A., Lange, G., & Nanda, N. (2023). Is this the subspace you are looking for? An interpretability illusion for subspace activation patching. arXiv. https://arxiv.org/abs/2311.17030\n\nWu, Z., Geiger, A., Huang, J., Arora, A., Icard, T., Potts, C., & Goodman, N. D. (2024). A reply to Makelov et al. (2023)'s \"Interpretability Illusion\" arguments. arXiv. https://arxiv.org/abs/2401.12631"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the \\emph{identifiability} of mechanistic explanations from interventional evaluations on toy neural networks. The authors find clear non-identifiability at multiple stages of the interpretability pipeline: multiple interpretations can exist for a circuit, multiple circuits can generate the same behavior, each algorithm can be aligned to multiple activation subspaces, etc.\nThis non-identifiability persists regardless of whether MI explanations are generated first by localizing a subset of the network, then deriving an interpretation, or first generating a candidate algorithm and trying to then find an activation subspace corresponding to that algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Primary weakness: The most novel contribution of this paper are its experimental results, which don’t have enough description: Appendix B only contains aggregated results about the number of circuits and average interpretations per circuit found, but e.g. lacks examples of said circuits for qualitative validation. This significantly undercuts my ability to validate the correctness of the experiments.\n\nIn addition, the authors acknowledge appropriately that identifiability may not be necessary if the goal of MI is merely to steer a model. However, much MI work is driven by a desire to simply further scientific understanding of language models. What types of scientific inquiries require computational identifiability, and which do not? The paper could be strengthened by further discussing how much identifiability matters if the goal is scientific understanding, rather than just the model steering mentioned in lines 520-527."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Minor comments, questions, and suggestions:\n\nLine 403 states that “larger architecture…could also lead to greater overparameterization”. This could benefit from elaboration; in particular, how larger architecture could lead to a reduction in the number of valid abstractions.\n\nOn Line 065, “Given the near impossibility of exhaustively searching all possible algorithms across all subsets of a neural network”, I might suggest to reframe this not as impossibility but as intractability, infeasibility or implausibility. Certain interpretability queries might have large search spaces that could nevertheless be searched efficiently. The relevant property is the complexity of the interpretability query, not merely the size of the search space. For computational complexity analyses of circuit discovery, see Adolfi et al 2024.\n\nOn Line 067, the authors state “researchers have developed approximation methods with different assumptions and trade-offs”. It seems to me that the circuit discovery methods that are typically developed are heuristics for circuit finding, not approximation algorithms with any proven guarantees. In any, case it would be useful if the authors can distinguish between these two categories in their descriptions.\n\nCitation to Van Rooij on Line 046 does not seem to fit with the corresponding sentence, as that paper does not deal at all with interpretability, as opposed to Lindsay, 2024, which is indeed an approapriate citation. For examples of studying the fundamental properties of (inner) interpretability queries see Adolfi et al., 2024.\n\nSection 2.1 mentions interpretability work on transformer models but only in language. An example from vision transformers can be found in Vilas et al. 2023.\n\nPlease clarify the notation in Definition 4.\n\nLine 229 makes an implicit statement about computational complexity but provides no citation. See Adolfi et al. 2024 for relevant complexity analyses. This is also relevant to the statement on Line 257. Here it would also be useful to clarify how uniform random sampling “approximates” the desired measure, as this seems non-obvious. Perhaps the authors mean random sampling is a heuristic with unknown properties?\n\nLine 494 states that current MI methods can only approximate their targets because exhaustive enumeration is impossible for large models. This is technically incorrect, as even for some problems with exponential serach spaces, efficient search algorithms that find optimal solutions are possible. The relevant notion is the computational complexity of the interpretabililty queries, not simply the size of their search space (see Adolfi et al., 2024).\n\nSection 2.1 describes a parallel between AI interpretability and neuroscience. A framework that draws from lessons grounded in this parallel is described in Vilas et al. 2024. This framework provides a nice embedding for the what-where distinction, corresponding to the algorithmic and implementational levels, respectively.\n\nThe problem of identifiability interacts in interesting ways with the computational complexity of circuit finding. Adolfi et al. 2024 analyses circuit queries that are relevant to the authors’ points on identifiability. See, for instance, counting problems which ask for the number of circuits in a train neural network that have a certain property (e.g., they are sufficient for a behavior). Furthermore, if the number of sufficient circuits is typically large, heuristics for otherwise intractable problems (e.g., sufficient circuit) could seemingly find their targets in a feasible amount of time. In this scenario, non-identifiability is an important catch."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* Well written\n* Well referenced\n* Adresses an issue of interest to the interpretability community\n* Provides exhaustive experiments with well-understood ground truth.\n* Investigates the effects of various variables (architecture size, number of tasks, noise) on the identifiability problem.\n\nThis kind of study is very much needed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors investigate the potential issue of identifiability in mechanistic interpretability through experiemnts on small MLPs where (isolated) circuits are ennumerated and assessed fairly exahustively. They find identifiability is an issue at all levels: in the number of subcircuits functionally aligned with the full network, in the number of algorithms consistent with the behavior, and in the mappings between algorithms and circuits. This problem gets worse as architecture size increases, and training on a greater number of tasks only mitigates this issue to some extent."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I first summarize the most important points and then elaborate.\n\n* It is unclear whether the conclusions about identifiability problems can be stated generally or only in particular for circuits as isolated objects (circuit discovery through zero-ablation), which might mischaracterize the functioning of the network as a whole.\n* The possible improvements advertised in the abstract/introduction are rather only sketched in section 5.1.\n\n\nIs the large number of circuits/possible explanations due to looking for circuits in isolation (e.g., via zero ablation) rather than working in context with the rest of the network (e.g., via activation patching)?\n\nLine 320 describes the circuit isolation procedure. This is equivalent to zero-ablation and the criterion is equivalent to the definition of suficcient circuit.\nHow would identifiability look like if we chose to define circuits as they function in the context of the full network? See for example the definition of circuit via activation patching in Adolfi et al., 2024.\n\nIsn’t it possible that many of the isolated circuits discovered through zero-ablation are mischaracterizations of the in-context functioning of the circuits as they are embedded in the full network?\n\n\nLine 080: “a model’s behavior should have a single, well-defined explanation”. There is no citation here and it is unclear where this intuition is coming from, what is it’s theoretical support, etc. To offer a counter-intuition: consider a circuit that is sufficient on its own to mimic the behavior of the full network over some input domain; such a circuit need not be unique. Trivially, the circuit plus additional neurons to form the full network is another such circuit. But there is no contradiction in intuiting that multiple such circuits of different sizes, with partial or no overlap exist in the network and, in principle, offer alternative (perhaps incompatible?) ‘explanations’ (see Adolfi et al. 2024 for theoretical analyses).\n\nOn Line 091: the authors mention “the identifiability properties of current MI criteria”. The criteria of interest that define circuits leave open the possibility that these circuits are not unique. So the definition of these circuits does not preclude non-identifiability unless the uniqueness property is trivially appended to the definition. This leads one to suppose that uniqueness under the typical definition of circuits is a property left to be determined empirically. It could, in principle, be motivated theoretically, but I see nothing in that direction here. Is it possible to provide some theoretical motivation for uniqueness that is not trivially stipulated but justified from first principles?\n\nIf a network implements the same functionality for different inputs through different circuits and algorithms, does this really make mechanistic interpretation hopeless? (i.e., in this case, is only a functional explanation capable of unifying all the existing ‘explanations’?). It would be useful to have any assumptions about satisfactory explanations made explicit in the manuscript.\n\nLine 489: “the challenge lies in defining criteria that distinguish valid explanations from misleading ones.”\nIt seems to me that, conceptually, identifiability does not pose a problem for distinguishing misleading from valid explanations. The problem arises only if an explanation is presented as unique or valid for a full input domain when this is not so. This issue might warrant some clarification.\n\nLine 490: “According to MI, the explanatory algorithm should be unique, meaning multiple competing explanations should not exist.” But this statement is made without citation. This assumption seems ill-founded to begin with, for the reasons mentioned above. Where does the criterion come from?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "(From the weakness section)\n- What is in the case of the MNIST NN the \"valid circuit\"?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors introduce their own taxonomy and formalization (e.g. where-then-what/what-then-where, Circuits, Mappings, etc) for important concepts discussed in the paper. While I haven’t fully wrapped my head around the usefulness of the taxonomy, I appreciate the effort to deconfuse different interpretability methods. I think this is the strongest suit of the paper and I wish they had focused the paper on the taxonomy and less on the experiments. \n- I enjoyed the writing style, and it was easy for me to follow, particularly Sections 2 and 3. I also found Figure 2 to be helpful. For the _what-then-where_/_where-then-what_ split. \n- The paper touches on an important topic within interpretability – a lack of quality in the discourse around helpful metrics for interpretations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "- The objective of the paper is to answer the question, do current criteria in mechanistic interpretability (MI) guarantee the identifiability of the explanation?\n- Authors sort MI methods into two broad strategies: \n - _where-then-what_ focuses on finding a subset of the network – a circuit – that captures most of the information flow from in- to outputs. Once this circuit is identified, the next step is to interpret its components (features) to derive the explanatory algorithm.\n - _what-then-where_ starts by identifying candidate algorithms and then searches subspaces in the neural network where the algorithm may be implemented, using causal alignment between the explanatory algorithm’s states and the network’s internal states. \n- They stress testing both methods with toy models: small MLPs trained on logic gates. They performed three main types of searches to test different interpretability criteria: \n - Circuits search: Looking for subnetworks that perfectly replicate the model's behavior\n - Interpretations search: Trying to map neurons to logical gates in a way that's consistent with their activations\n - Mappings search: Testing different ways to map logical gates to groups of neurons\n- Key findings in toy models: they found multiple valid interpretations for the same network.\n - 85 different circuits that achieved perfect accuracy\n - An average of ~536 possible logic gate interpretations per circuit\n - 159 perfect minimal mappings between algorithms and neurons\n - In total, over 45,000 different possible computational abstractions\n- Experiment larger NN trained on a subset of MNIST revealed similar dynamics\n - The circuit search found over 3000 valid circuits"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While I think the taxonomy could bear some relevant fruit, for experimentally testing the metrics using NN this small, it seems challenging to generate insights that are relevant to interpretability as a total. From my understanding, we are not close to having high-confidence circuit interpretations (“perfect circuits”) in the first place, so working under this assumption might be several steps ahead.\n- Continuing on the larger NN experiments: I struggled to understand the points in line 479 ff. What is in this case the \"valid circuit\"? I hoped this section could have bridged a gap toward showing how this metric could be used in the future but failed to show its limitations in this instance. (But maybe I simply oversaw that.)\n- Lastly, I think there is also too little emphasis on existing literature that clearly touches on the underlying problem: disentanglement. I would consider modeling the paper around the taxonomy and then focusing on existing research and problems in relation to known circuits, such as IOI."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024everything,\ntitle={Everything, Everywhere, All at Once: Is Mechanistic Interpretability Identifiable?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5IWJBStfU7},\nnote={under review}\n}"
},
"abstract": {
"value": "As AI systems are increasingly deployed in high-stakes real-world applications, ensuring their interpretability has become critical. Mechanistic Interpretability (MI) is a promising approach that aims to reverse-engineer neural networks to extract simple, human-understandable algorithms embedded in the neural structure that explain the model’s behavior. \nIn this work, we investigate a fundamental concern with concrete formalizations of MI: do current criteria guarantee the identifiability of the explanation? We borrow the concept of identifiability from statistics to express the intuition that an explanation should be unique, meaning that the criteria for selecting explanations should not allow for multiple, incompatible solutions.\n\nWe identify two broad strategies to produce MI explanations: (i) \"where-then-what\", which first identifies a subset of the network (a circuit) that replicates the model's behavior before deriving its interpretation, and (ii) \"what-then-where\", which begins with candidate explanatory algorithms and searches in the activation subspaces of the neural model where the candidate algorithm may be implemented, relying on notions of causal alignment between the states of the candidate algorithm and the neural network. We systematically test the identifiability of both strategies using simple tasks (learning Boolean functions) and multi-layer perceptrons that are small enough to allow for the complete enumeration of candidate explanations. Our experiments reveal that current criteria suffer from identifiability issues at every stage: multiple circuits can replicate model behavior, multiple interpretations can exist for a circuit, several algorithms can be causally aligned with the neural network, and each algorithm can be aligned to multiple, different, subspaces of the neural network.\n\nThese findings suggest that current criteria are too permissive and need refinement to ensure identifiability. \nWe discuss the generalization of our results to larger models and potential fixes based on stricter criteria. Our work aims to contribute constructively to the ongoing effort to develop rigorous formalizations of MI's assumptions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"AI interpretability",
"mechanistic interpretability",
"causal consistency",
"explanatory algorithms",
"circuits"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/175d1e637536c70315e38a151f8f4c27d67b1afd.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Everything, Everywhere, All at Once: Is Mechanistic Interpretability Identifiable?"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5IZfo98rqr | Decomposing The Dark Matter of Sparse Autoencoders | main | Active | Sparse Autoencoders;Dictionary Learning;Language Model Features;Scaling Laws;Mechanistic Interpretability | interpretability and explainable AI | 3;3;3;5 | 2;2;4;2 | 2;2;2;2 | 3;2;3;3 | 2;1;4;1 | 3.5 | 2.5 | 2 | 2.75 | 2 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Why was Gemma analyzed? Why was Gemma Scope chosen? What is special about these choices? (Availability as an open source model with a published SAE is an acceptable answer, but it that should be stated in the paper that the choice was made out of convenience.)\n- Did you train the SAE from scratch, or are you using a the published checkpoint?\n- What type of sparse autoencoder is considered? How was it trained? (As mentioned above, briefly describe the technique if you are using a published checkpoint.)\n- Figure 1:\n - What are the equations in the labels supposed to mean?\n - Where did the data come from?\n - Exactly which part of the figure is “dark matter”?\n- Equation 1:\n - is the vector w “random” or is it actually a coordinate of x in the y basis?\n - Is ||w||_1 << d the right equation? Couldn’t one of the components be much larger than one?\n- Line 196: What is the set up of the synthetic set up?\n- What is L_0?\n- Line 242 --The exact set up of the random vectors here was unclear.\n- What is Figure 3b supposed to show?\n- Line 240: Exactly what is “likely” supposed to mean?\n- Line 264: Bibliography: Gemma reference has a broken last name: “Team”\n- Figure 4: The caption is not a full sentence and not clear. Expand the caption.\n- What does the last equation on Line 490 say?\n- Appendix A:\n - This section needs a little more introduction: What is the proof trying to show, and what does it imply?\n - Line 697: Why is the case \\lambda=1 special, and what do we conclude from rho=0.73 ?\n - Is d > m?\n - Line 662: define WLOG\n- Appendix B: report the results in the appendix, instead of just qualitatively describing them.\n- Does Appendix C have a reference in the main text?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper’s analysis is very thorough and a very compelling decomposition of the scaling of sparse autoencoders for feature extraction. The observations are very well described, and the theory and methodology is sound. I think it would be an impactful work, however, it does have one fatal shortcoming: (see below)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is an analysis into the error of sparse autoencoders applied to LLM interpredability. They address the shortcoming of the ability to reconstruct the hidden state, and observe that scaling laws show that an SAE would not be able to fully represent the hidden state, even in the limit. The theoretical framework is applied to decomposing the error of one layer of the Gemma 2 open source LLM, using the open source SAE published in a separate work, which agrees with the breakdown."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses\nThe main weakness is that the method is only applied in one model, and even worse, only one layer in the model. The methodology is self described as empirical, but because only one context is analyzed, the findings are not sufficiently proven because the paper essentially shows only one data point. That is, is it possible that this only happens in this one layer for this one model? I would not think that the findings are exclusive to just this one context, but would it be possible to construct a LLM architecture for which this result does not hold? It should be easy to repeat the analysis for various open source models and different layers, and to thus illustrate that the observations and theory hold more widely. With just more demonstrations of the same trends across more models, the paper would be strong.\n\n\nAnother easy to correct shortcoming in the presentation is that details of the autoencoder training process are left out. There is a citation to GemmaScope, but given that this paper is trying to make a broader theoretical claim, a short description of the SAE training process would be apt. Similar to applying the method to more models and layers, it would also be interesting to verify if different SAE algorithms resulted in features that change the results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Why is it necessary to sanity check that estimated linear errors and estimated non-linear errors are correlated in Section 4.1? I found this experiment technical and in-the-weeds and couldn't tell why it needed be done (the interesting parts of the paper are with the actual LLM and actual error).\n\n2. Why are all the linear extractors single linear matrices, or another SAE? Shouldn't an SAE with no sparsity penalty be applied somewhere so it's possible to learn features in superposition without interference (not possible with a plain linear matrix) and features that are dense (not possible with a sparsity penalty)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* I think that the biggest two problems with Sparse Autoencoders and Mechanistic Intepretability research is i) faithfulness of decomposition of models and ii) real-world application of insights. This paper is solid progress on i) because they ask why SAEs are currently limited.\n\n* The paper describes a sensible theoretical decomposition of model activations into SAE learned features, dense features and non-linear features, and also measures these terms in practice. \n\n* The paper does a wide range of analyses: automated interpretability, FVU and loss measurements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyzes sparse autoencoder (SAE) errors in language models and found they could decompose these errors into three parts: unlearned sparse linear features, a dense linear term and a \"nonlinear error\" term that persists even as SAEs get larger. The paper studies the nonlinear error across different token posistions and SAE widths, with experiments on Gemma-2 9B. The paper attempts to reduce this nonlinear error through two methods: using gradient pursuit during inference (which only slightly helped) and leveraging SAE reconstructions from adjacent model components.\n\nI found this paper complex, and while I think it is flawed in the current state I am happy to revise my opinion if my concerns are addressed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Large sections of the paper are difficult to understand**\n\na. Some notation is defined in strange ways that require me to reread them many times. E.g. `SaeError(x) := x - Sae(x)` is defined with a -1 coefficient for `Sae(x)`, but `NonlinearError(x) := Sae(x) - Wx - \\sum_{i=0}^{m} w_i \\vec{y}_i` is defined with a +1 coefficient for `Sae(x)`. I *think* that most of these problems are downstream of defining some parts of the notation with an end state in mind, e.g. the weak linear representation hypothesis suggests that there exists sum ideal dictionary decomposition of the vector, but other parts of the notation is defined with the current state in mind, e.g. you slot `SaeError(x)` into the sum with the existing `Sae(x)` and `x` terms and nothing else. However, I'm not confident that this consistently considering solely the end state or current state is either necessary or sufficient for making the notation clearer.\n\nb. Some statements do not make sense after several times re-reading. E.g. \"The intuition behind this test is that if ... its existence is not guaranteed\".\n\nc. \"If this test is accurate, we can use it to estimate the linear component of the error, `Wx + Dense(x)`\" but `Dense(x)` was introduced as possibly non-linear, and `Wx` is non-linear, so how is this the **linear** part of the error?\n\n2. **I think the conclusions are too strong**.\n\nThe paper states \"We also find that the norm of the `NonlinearError(x)` is constant on a per token level as\nwe scale SAE width\", and while this section changes the definition of `NonlinearError` to be the error as determined by their method, in the conclusion the paper states \"... the presence of constant nonlinear error ...\" with no hedging.\n\nI am not convinced that the methods in the paper are capturing *true* linear error and non-linear error. One reason this may be happening is that all the training on the error term `x - Sae(x)` may involve some vestigual `x` due to shrinkage (which could be boosted by 10x by the various predictors, since the appendix suggests there is 10% shrinkage). This would mean the various methods that predict linear or nonlinear error may be cheating and picking up on this shrinkage. Is this addressed in the paper? Why is there no hedging in the conclusion that the methods presented may not capture true (non-)linear errors, or even that the weak linear representation hypothesis may not be true. For example, in the extreme case where the SAE was entirely dead, we can predict `SaeError(x) = I * x` and so the SAE solely has linear error, which suggests there are some assumptions that need to be stated (that the SAE is sufficiently good at reconstructing I think)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Check the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper focuses on the commonly ignored detail—SAE error—to provide more insights for thoroughly understanding a research subject related to the representation of language. \n2. It examines SAE error from multiple aspects, such as scaling law, norm prediction test, etc.\n3. It investigates ways to reduce NonlinearError in detail."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tries to decompose the error components of sparse autoencoders (SAEs) to help better interpret language models. It uncovers that the SAE error can be predicted and analyzed, and provides insights for reducing it."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "# Major\n\nTo be honest, I had a very hard time understanding this paper. Specifically,\n\n1. How do you define the term “most common” (features..) on line 146? It is ambiguous. \n2. Section 4.1 is not well written, which makes me hard to understand the later sections of the paper. Specifically:\n 1. Line 193 and 194 do not make sense. For instance, Dense is nonlinear, how can the sum between Wx and Dense be linear component of the error?\n 2. What leads you to make the statement on line 200 to 201? You don’t even know what the “true features” are. Please elucidate. \n 3. It is questionable to claim “the percent of variance left unexplained by the regression will be an upper bound on the true variance explained by NonlinearError(x).” First, please provide a clear definition of “variance explained or unexplained”, perhaps in the appendix. Second, if this is a concept similar to the explained variance in PCA, I’m not sure if you can readily extrapolate that to nonlinear components. Some rigorous derivation is needed. If you are not sure, you need to stress this is an assumption.\n 4. Here the so called synthetic setup is just for confirming that the *SAE is approximately an identity function on its image, i.e., the linear subspace of* $\\mathbf{x}'$. There is no need for this long verbosity.\n 1. Also, what is the motivation for this? I think there should be more clarification.\n 5. Linear transformation $\\mathbf{a}$ (line 161) should be denoted by $A$ for consistency (since you have $W\\mathbf{x}$). You current notation makes it look like evaluating inner product. I don’t think this is the same as the one in Section 4.2, right? Also, clarify its output dimension.\n 6. The Gaussian noise simulation on line 204 to 207 is kinda questionable. \n 1. The set of $\\mathbf{x}$ could be a curved low dimensional manifold. In this case the Gaussian noise—whose support is a practically a ball of the same dimension as $X$—cannot accurately simulate Dense(x). \n 2. The output of Dense and NonlinearError could be correlated in reality. \n 3. The functions Dense and NonlinearError could be continuous, so Gaussian noise may not accurately simulate them.\n 7. Since SAE is approximately an identity function according to you (line 204 to 205), we can safely assume that $\\mathbf{x}’$ and SAE($\\mathbf{x}’$) are identical. Then adding Gaussian noises to each of them simply makes the simulated SaeError (i.e., $\\mathbf{x}$ - SAE($\\mathbf{x}$)) an Gaussian noise. I’m not sure how using a linear map $\\mathbf{a}^\\top \\mathbf{x}$ can derive the results in Fig. 2, or maybe I just got it completely wrong since I can hardly understand your writing. I think you might need to elaborate on line 202 to 207.\n\nI’d like to stop here since Section 4.1 has already baffled me enough, making me impossible to review the later sections. I think some revision is needed to streamline the narrative. \n\n# Minor\n\n1. On line 135, I think you mean $\\|\\mathbf{w}\\|_0\\ll d$.\n2. What is called the “linear subspace of $\\mathbf{x}$” (line 187)? $\\mathbf{x}$ is just a point in the activation space $X\\ni\\mathbf{x}$. If you mean a linear subspace of $X$, then is it *proper*, i.e., it cannot be $X$ itself?\n3. Use the correct citation format \\citep and \\citet. For instance, line 35."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Combined with previous section for clarity."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The core idea to decompose the SAE reconstruction area into various parts is very interesting and, as far as I am aware, novel. Having such a decomposition could potentially be very useful for training better SAEs and informing the debate around the extent to which the linear representation hypothesis holds.\n- The authors attempt to measure these quantities using a wide range of experiments. They also study the downstream effects of each component of their decomposition, and investigate methods for reducing the nonlinear error. Overall, I think the breadth of their empirical work is good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper attempts to decompose the reconstruction error of sparse autoencoders into different interpetable components: (1) unlearned linear features, (2) residual dense features, and (3) nonlinear error introduced by the SAE. The authors explain their decomposition and then proceed to try to measure various components of the errors. They attempt to measure the linear and nonlinear portions of the error by training a linear regression on the activation vectors to predict the SAE recontruction error. They claim to find some irreducible nonlinear error arising from the SAE which does not disappear as the number of parameters in the SAE increases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main weakness of this paper is that the central quantities are not defined clearly enough for me to be able to properly understand what is going on, or the adequacy of their empirical experiments. My current understanding, rephrased in my own language is that the authors are making the following decomposition:\n- Activation vector $x$\n - The sparse autoencoder reconstruction: $\\textup{Sae}(x)$\n - The error of the SAE reconstruction: $\\textup{SaeError}(x)$\n - Unlearned sparse features: $\\sum_{i=m}^n w_i y_i$\n - Dense feaures which will not be learned by the SAE: $\\textup{Dense}(x)$.\n - Additional error: (I don't know if the authors have a term for this)\n - Linearly predictable additional error: $Wx$\n - Nonlinear error: $\\textup{NonlinearError}(x)$\n(where my notation is that each bullet point is the sum of the bullet points nested at one level below it). This was my understanding after having spent quite some time re-reading Section 3, so I'll proceed as if this is correct, but I'm not confident and I'd appreciate clarification from the authors.\n\nAssuming the above, I have several comments:\n- The exposition in Section 3 needs to be substantially clearer for readers to be able to understand your definitions. For example, you should name the error mentioned in \"the SAE introduces some error when making this approximation\" and give it a consistent name. I also think equations (3) and (4) are misleading. According to my understanding, (4) is the central definition decomposing the SAE error and (3) is a consequence of (4) when you consider subtracting from $x$.\n - As an aside, the introduction of $W$ is poorly motivated here. Also, later on you're going to consider another sense in which the error can be linearly predicted from $x$ - namely using $a^T x$ to predict $\\textup{SaeError}(x)$. I believe that these are not the same, but I was initially confused here and I don't understand the difference in intuition between them.\n- In Figure 1, I don't see why Dense Features and Linear Error should be grouped together. They seem like quite different quantities with different interpretations to me.\n- The changes of notation and setup in the first paragraph on page 6 are very unhelpful given there's already a lack of clarity. I'd strongly recommend that the authors stick to consistent notation throughout the paper, and choose a single - clearly distinct - term and corresponding notation for each component of their decomposition and stick with it throughout the paper.\n - As an aside, if we're dropping $Wx$ in this paragraph, why did we have it in the first place? I'm not sure I ever understood the intuition for having it.\n- Since you are agnostic to the SAE architecture, you don't introduce any notation for the features that the SAE learns. This lead to me getting confused and originally not understanding that the SAE reconstruction error comes from essentially four places: learning wrong features, learning wrong feature weights, unlearned linear features, and (unlearned) dense features. My understanding is that the nonlinear error is effectively measuring the first two. Is that right? If so, a clarifying comment in this direction might be helpful.\n\nSecondly, I did not understand the description of and intuition behind the experiment in the first paragraph of page 4. I think this might be downstream of the fact that I haven't completely understood the authors' decomposition of the SAE error. But in any case, either the authors need to offer a clearer definition of the quantities they are working with, or more intuition for why this experiment claims to measure what they are hoping - and probably both.\n\nGiven this lack of clarity in a couple of key places in the manuscript, it was hard for me to engage with the more detailed experimental results in Sections 5 onwards, since I couldn't understand what the authors were actually hoping to measure. The problem that the authors are trying to study seems fundamentally very interesting, and I'm optimistic that some version of this paper could be very solid, but as it stands it's not possible to appreciate the authors' contributions.\n\nMore minor points:\n- I think the synthetic experiment setup in Section 4 is unlikely to be particularly realistic - particularly the assumption of Gaussian noise. (My read of Gurnee (2024) which the authors cite regarding pathological reconstruction errors makes the Gaussian assumption likely incorrect.) But, this is not a central concern.\n- I suggest that the authors read over their manuscript for minor typographical errors. Ones that I caught include: issues with citation formatting in several places, summation indices in equations (3) and (4) being incorrect, and several stray commas."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We study SAE scaling by breaking SAE error into that which is linearly explainable from activations and that which is not."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024decomposing,\ntitle={Decomposing The Dark Matter of Sparse Autoencoders},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5IZfo98rqr},\nnote={under review}\n}"
},
"abstract": {
"value": "Sparse autoencoders (SAEs) are a promising technique for decomposing language model activations into interpretable linear features. However, current SAEs fall short of completely explaining model performance, resulting in ``dark matter''—unexplained variance in activations. In this work, we predict and verify that much of SAE dark matter can be linearly predicted from the activation vector. We exploit this fact to deconstruct dark matter into three top-level components: 1) unlearned linear features, 2) unlearned dense features, and 3) nonlinear errors introduced by the SAE. Through a scaling laws analysis, we estimate that nonlinear SAE errors stay constant as SAEs scale and serve as a lower bound of SAE performance on both an average and per-token level. We next empirically analyze the nonlinear SAE error term and show that it is not entirely a sparse sum of unlearned linear features, but that it is still responsible for some of the downstream reduction in cross entropy loss when SAE activations are inserted back into the model. Finally, we examine two methods to reduce nonlinear error: inference time gradient pursuit, which leads to a very slight decrease in nonlinear error, and linear transformations from earlier layer SAE dictionaries, which leads to a larger reduction."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Sparse Autoencoders",
"Dictionary Learning",
"Language Model Features",
"Scaling Laws",
"Mechanistic Interpretability"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8a38715a500c27761e0617182c5a4ae4fd49617d.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Decomposing The Dark Matter of Sparse Autoencoders"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5IkDAfabuo | Prioritized Generative Replay | main | Active | online learning;model-based reinforcement learning;generative modeling;synthetic data;continual learning | reinforcement learning | 5;5;6;8 | 3;4;4;3 | 3;3;4;4 | 3;2;3;4 | 3;3;3;3 | 6 | 3.5 | 3.5 | 3 | 3 | -0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I'll rephrase my above concerns as questions. \n\n1. How is this method novel with respect to prior work that uses intrinsic rewards on rollouts from a learned dynamics model? It seems like a very similar approach to acquiring data that scores well under a given guidance function F, where F can be ICM or another intrinsic reward. \n\n2. How does this method handle noisy-tvs?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* This work proposes a scalable method for training model-free or model-based agents in a variety of domains. I believe the formulation is simple enough to be integrated into and improve other approaches. \n\n* I also found the presentation clear and easy to read. \n\n* I found the scaling experiments to be very compelling, I'm a little concerned about the general thrust of driving up the syn-real data ratio as high as possible, since we do need to ground the generations in real experience. But I still think insights here are valuable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work proposes a form of sample based experience replays that leverages a generative model to provide and augment samples drawn from the replay buffer. To avoid overfitting, a set of guidance functions are used to steer the generative process toward diverse and useful samples. The generative replay mechanism is a diffusion model that is conditioned on some auxiliary information. The authors propose a few different versions of this conditioning such as intrinsic curiosity, TD error, or Q values. The idea is that using these scores, the generative model can be steered towards generating high quality samples. Given such a replay mechanism, this work evaluates model free and model-based RL agents trained via this generative replay on gym and dmc.The results show improvement on both pixel based and state based tasks. There are also ablations with larger policy networks and higher generative data rations, which show further improvements."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have two points of contention with this work. \n1. From a paradigm perspective, I don't understand how this is different from prior work in model-based RL that apples intrinsic rewards to a learned dynamics model [1] or world-model [2]. These methods also utilize a generative model as a copy of the environment, then train the agent in simulation to acquire interesting data (under the intrinsic reward). It seems that this method does the same, except that instances, rather than full trajectories are generated. I do see how this is different than just applying an intrinsic bonus during training, since here the synthetic data has a chance to be more diverse. \n\n2. I thank the authors for providing numerous experiments, but I am not at all convinced that this method is robust to the choice of guiding function F. ICM is known to be susceptible to the noisy TV problem, where difficult-to-model environmental factors score arbitrarily high under ICM. The chosen tasks are too simple perceptually to see this problem. This in and of itself is not a problem, but it means that we need to search for another F that works for our task which is hard in practice. In the meantime, there are other intrinsic rewards that do not suffer from this pathology [3]. \n\n\n\n[1] Shyam, Pranav, Wojciech Jaśkowski, and Faustino Gomez. \"Model-based active exploration.\" International conference on machine learning. PMLR, 2019.\n\n[2] Hafner, Danijar, et al. \"Dream to control: Learning behaviors by latent imagination.\" arXiv preprint arXiv:1912.01603 (2019).\n\n[3] Savinov, Nikolay, et al. \"Episodic curiosity through reachability.\" arXiv preprint arXiv:1810.02274 (2018)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What exploration method does the agent use?\nCould the exploration method be improved instead of the sample generation to improve diversity of samples?\nWould a combination of both a better exploration and this method be the optimal and a possible solution?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "One of the strength of this paper are the clear and concise language as well as good structured presentation of the proposed method.\nIt is quite logical to improve on the already existing prioritized experience replay method and implement it in the generative domain. The method is explained well and should be quite easily reproducable.\nOverall the research could be a valuable contribution to the reinforcement learning community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a conditional diffusion model as a synthetic replay buffer to combat early overfitting in online reinforcement learning and to diversify experiences for the learning model. This is achieved with a relevance function that selects samples that are rare but important for learning based on the temporal difference error, the value of a learned Q-function and a modified intrinsic curiosity module."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "A topic i feel like missed somewhat are the different ways to approach generative replay such as mentions of other generative models (e.g. variational auto encoders, gaussian mixture models) and why they were not used.\nOne thing i found rather off putting and this is very nitpicky is that the Tables 1, 2 and 3 are a bit crammed and slightly off from each other."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is the method compatible with different kinds of exploration bonuses? If so, how do you think they would compare?\n2. How do you think the method would do when simply having diverse samples does not imply usefulness? An example is the noisy tv problem.\n3. How sensitive is the algo towards the frequency of the inner loop in Algo 1?\n4. Can multiple relevance functions be combined?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well written and provides a clear explanation of their method.\n2. The research problem addressed in the paper is well laid out and is an important one to improve the performance of RL methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes to use conditional diffusion models to improve the experience replay for an RL learning agent. The method proposed improves performance by improving the diversity of the samples in the experience replay buffer and reducing overfitting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the method shows improved performance, it is a bit simple as it combines existing elements in diffusion models and RL to propose the solution.\n2. It would be useful to compare the effect of different kinds of exploration bonuses."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) How robust is PGR to errors in the learned dynamics model? Are there ways to mitigate the impact of inaccurate dynamics predictions on the curiosity-based relevance function?\n2) Could PGR be extended to offline RL settings? If so, what modifications would be necessary?\n3) How does PGR's performance compare against PER baselines which use approximate parametric models of prior experience?\n4) Are there any other relevance functions thats been tried out? As thats core to the working of PGR."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1) PGR offers a fresh perspective on replay buffers by combining generative modeling with guided replay. Framing this problem as a conditional generation problem with diffusion models is novel.\n2) Diffusion model typically uses one single set of HPs requires no additional tuning I'd assume. This works well for PGR\n3) Empirical results on various benchmarks demonstrate that PGR consistently outperforms existing model-free and model-based RL algorithms, as well as a generative replay baseline without guidance. Also has been shown to work in both state-based and pixel-based environments. \n4) PGR is shown to scale well with larger policy networks and higher synthetic-to-real data ratios (important ablation that I wanted to see), potentially enabling more data-efficient training of large-scale RL agents. Really important result for scaling to many real use cases.\n5) The authors also provide insights into why PGR works, particularly highlighting the role of curiosity in promoting diversity and reducing overfitting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a framework called Prioritized Generative Replay (PGR), a novel approach to enhance sample efficiency in online reinforcement learning (RL). Traditionally, replay buffers store experienced transitions and replay them uniformly or with prioritization based on metrics like TD-error. However, the authors point out that uniform replay can be inefficient, and prioritization can lead to overfitting. PGR addresses these issues by using a conditional generative model to create a parametric replay buffer. \n\nThe paper claims that this allows for two key advantages:\n1) Densification: The generative model can create new, plausible transitions beyond those directly experienced, enriching the training data, especially in sparsely explored regions.\n2) Guidance: By conditioning the generative model on \"relevance functions,\" the generated transitions can be steered towards areas more critical for learning, such as states with high novelty or uncertainty.\n\nThe authors also explore various relevance functions, including return, TD-error, and curiosity. They find that curiosity, based on the prediction error of a learned dynamics model, performs best. This is attributed to its ability to promote diversity in the generated transitions, thus reducing overfitting. They also show that their approach consistently improves performance and sample efficiency in both state- and pixel based domains."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The curiosity-based relevance function relies on a learned dynamics model, which might be challenging to train accurately in complex environments.\n2) Increasing Synthetic Data ratio does not benefit PGR and the unconditional baseline (SynthER) equally. PGR scales better at r=0.75 than SYNTHER but neither benefits from 0.875. We would think the trend would be consistent? whats the intution behind this? Also this figure 7 could be improved with the variation in r being shown\n3) (Minor) writing issues throughout the paper with some missing words etc. Please re-read the paper and make the necessary changes."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We construct a conditional generative model of an agent's online memory, allowing us to replay high-priority data at large quantities to accelerate training of online RL agents."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024prioritized,\ntitle={Prioritized Generative Replay},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5IkDAfabuo},\nnote={under review}\n}"
},
"abstract": {
"value": "Sample-efficient online reinforcement learning often uses replay buffers to store experience for reuse when updating the value function. \nHowever, uniform replay is inefficient, since certain classes of transitions can be more relevant to learning. While prioritization of more useful samples is helpful, this strategy can also lead to overfitting, as useful samples are likely to be more rare. \nIn this work, we instead propose a prioritized, parametric version of an agent's memory, using generative models to capture online experience. This paradigm enables (1) densification of past experience, with new generations that benefit from the generative model's generalization capacity and (2) guidance via a family of ``relevance functions'' that push these generations towards more useful parts of an agent's acquired history. We show this recipe can be instantiated using conditional diffusion models and simple relevance functions such as curiosity- or value-based metrics. \nOur approach consistently improves performance and sample efficiency in both state- and pixel-based domains. We expose the mechanisms underlying these gains, showing how guidance promotes diversity in our generated transitions and reduces overfitting. We also showcase how our approach can train policies with even higher update-to-data ratios than before, opening up avenues to better scale online RL agents."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"online learning",
"model-based reinforcement learning",
"generative modeling",
"synthetic data",
"continual learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/aee09a932dc3dd1a89d09a2723d86f42adaf0d7e.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Prioritized Generative Replay"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5IvTw0qMKj | C$^{2}$INet: Realizing Incremental Trajectory Prediction with Prior-Aware Continual Causal Intervention | main | Active | Trajectory Prediction;Causal Intervention;Variational Inference;Continual Learning | causal reasoning | 3;5;6 | 3;3;3 | 2;2;2 | 2;2;3 | 1;3;3 | 4.666667 | 3 | 2 | 2.333333 | 2.333333 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could the authors elaborate on how the proposed C2INet model could be implemented in practical real-world systems, especially where computational resources may be limited (e.g., embedded or low-power devices)?\n\nCan the authors provide a more simplified explanation of the theoretical basis, particularly the optimization of KL divergence and the adjustment of priors in multi-task scenarios? This would help make the paper more accessible to a broader audience.\n\nIs there any plan to test the generalization ability of C2INet on larger or more diverse datasets, including scenarios with real-time prediction in live driving conditions? How might the model perform in unseen or significantly different environments?\n\nWhile the paper discusses mitigating environmental bias, how does C2INet handle other types of data biases, such as sample distribution imbalances or long-tail effects? Would these impact the model's performance or lead to biased predictions?\n\nCould the authors provide more details about the computational complexity of C2INet, such as the number of parameters and resource usage? This would clarify the scalability and practicality of the model for large-scale or real-time applications."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper presents a novel method combining causal intervention with a continual learning framework, effectively addressing the problem of bias and forgetting in multi-task learning.\n\nThe experimental analysis demonstrates that C2INet performs robustly on multiple datasets, achieving significant improvements in trajectory prediction accuracy compared to existing methods.\n\nThe integration of the memory module and prior queue is well-motivated and effectively implemented to handle changing environments, showcasing the potential for real-world applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces C2INet, a novel model designed to enhance multi-agent trajectory prediction in dynamic environments by addressing key challenges like environmental bias and catastrophic forgetting. C2INet incorporates a prior-aware memory module that stores optimal priors across tasks, enabling it to adapt incrementally to new scenarios while retaining performance on past tasks. A core component of the model is the continual causal intervention mechanism, which aims to disentangle latent confounding factors that could negatively impact prediction accuracy.\n\nThe model's design emphasizes a balance between computational efficiency and performance, employing variational inference to align environment-dependent priors with posterior estimates, thus maintaining robustness against latent biases. The inclusion of a pseudo-feature-based pruning strategy ensures the memory module remains efficient and manageable even as task volume increases. C2INet's framework is evaluated on datasets such as ETH-UCY and Stanford Drone, showcasing its strong adaptability and significant improvements in prediction metrics like Average Displacement Error (ADE) and Final Displacement Error (FDE) when compared to traditional trajectory prediction models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the paper introduces causal intervention comprehensively, some theoretical explanations, particularly regarding the optimization of KL divergence and multi-task prior adjustments, could benefit from simplification. This would make the paper more accessible to a broader audience.\n\nThe experiments are thorough but somewhat limited in terms of application diversity. The paper could be strengthened by including analyses on more varied or complex real-world scenarios, such as real-time predictions in live driving conditions.\n\nCertain sections, such as the derivation of equations and framework details, are presented in a complex manner that may challenge the reader's understanding. A clearer and more concise explanation would enhance readability.\n\nWhile the paper mentions addressing environmental biases, there is insufficient analysis of other types of potential biases in the dataset, such as sample distribution imbalance and long-tail effects. The paper does not delve deeply into how the model performs in broader contexts."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "You have not provided detailed mathematical derivations for the causal intervention section. Could you elaborate further or reference additional causal inference theories to support the method? Specifically, how do you ensure that the intervention effectively removes confounding factors, and have you considered potential causal relationships between different tasks?\n\nWhat are the computational complexity and storage requirements of C2INet? How do you ensure real-time performance in hardware-constrained environments (e.g., embedded systems or mobile devices)? Is there a quantitative analysis of resource consumption, or have you tried to optimize the algorithm to reduce computational load?\n\nAs the number of tasks increases, will the storage requirements for the prior memory module become unmanageable? How do you manage priorities or compress the memory module effectively when storage space is limited? Have you considered the issue of memory overflow as the number of tasks continues to grow?\n\nC2INet involves several key hyperparameters (e.g., the KL divergence adjustment coefficient, weights in the memory module). Could you provide insights on how to tune these hyperparameters? Have you considered an adaptive hyperparameter optimization mechanism to reduce the reliance on manual tuning?\n\nThe datasets used are mainly focused on pedestrian and vehicle trajectory prediction, which is somewhat limited in scope. Do you plan to test C2INet in more diverse and complex scenarios to evaluate its applicability and generalizability?\n\nGiven that C2INet includes multiple complex modules (e.g., memory module and causal intervention), how interpretable is the model? Do you plan to provide more intuitive visualizations or explanations to demonstrate the model’s decision-making process?\n\nSince the paper has already discussed the situation of hardware resource constraints, has it considered other optimization methods such as hardware acceleration or knowledge distillation? What advantages does the proposed optimization method have compared to these alternative approaches?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Adaptability to Multiple Scenarios:\nBy leveraging continual causal intervention, C2INet effectively handles confounding factors in diverse scenarios and retains information from previous tasks. This design enhances the model's adaptability to dynamic environments, making it suitable for multi-agent trajectory prediction in complex settings, such as autonomous driving and crowd monitoring.\n\n2. Comprehensive Experimental Validation:\nThe paper provides extensive validation across multiple datasets (ETH-UCY, Stanford Drone, synthetic datasets) and compares C2INet with various baseline methods, including common causal intervention and continual learning approaches. The results demonstrate that C2INet outperforms traditional methods in key metrics (e.g., ADE and FDE), proving its effectiveness in handling catastrophic forgetting and improving prediction accuracy.\n\n3. Modular Design:\nC2INet’s design is modular, making it compatible with multiple baseline models (e.g., STGAT, SocialSTGCNN). This plug-and-play characteristic increases the flexibility of the approach, allowing it to be used in various model architectures and promoting wider applicability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The study introduces C2INet, an innovative method for multi-agent trajectory prediction in intricate settings that utilizes ongoing causal intervention. C2INet integrates causal intervention and continuous learning inside a memory-enhanced framework to tackle challenges such as environmental bias, catastrophic forgetting, and hardware limitations in real-time multi-agent prediction. This method use variational inference to synchronize environment-related priors with a posterior estimator, guaranteeing precise trajectory representation by addressing confounding variables in the latent space.\n\nC2INet's principal innovation is its ongoing learning process, which progressively adapts to new circumstances while maintaining performance on previously encountered ones. This is accomplished via a memory module that retains ideal priors across situations, therefore safeguarding essential knowledge and reducing overfitting via a pruning process. Comprehensive assessments of real-world datasets, including ETH-UCY and Stanford Drone, alongside synthetic datasets, reveal that C2INet surpasses conventional methods in predictive accuracy and resistance to task interference, attaining notable enhancements in metrics such as ADE and FDE across multiple tasks.\n\nC2INet effectively mitigates significant shortcomings in current trajectory prediction models by integrating causal intervention with a continuous memory framework, hence guaranteeing strong performance in dynamic, multi-agent settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Innovative Design:\nThe paper introduces a novel model, C2INet, which combines causal intervention with continual learning, specifically using a memory module to retain optimal priors across different scenarios to mitigate catastrophic forgetting. This approach is relatively rare in multi-task learning and offers a degree of originality.\n\n2. Adaptability to Multiple Scenarios:\nBy leveraging continual causal intervention, C2INet effectively handles confounding factors in diverse scenarios and retains information from previous tasks. This design enhances the model's adaptability to dynamic environments, making it suitable for multi-agent trajectory prediction in complex settings, such as autonomous driving and crowd monitoring.\n\n3. Potential Limitations of the Prior Memory Module:\nAlthough the prior memory module helps alleviate catastrophic forgetting, it heavily relies on storing priors for different tasks, which may lead to challenges in memory management and capacity. As the number of tasks grows, the memory module might struggle to scale efficiently. Additionally, the paper does not discuss how to effectively manage priority or memory compression when storage space is limited.\n\n4. Limitations of the Experimental Datasets:\nAlthough the paper uses multiple datasets, these are mainly focused on specific domains (e.g., pedestrian and vehicle trajectory prediction) and lack diversity. The generalizability of the experimental results to more complex or diverse dynamic environments is unclear, limiting the method’s applicability to real-world scenarios.\n\n5. Complex Hyperparameter Tuning with Limited Guidance:\nC2INet involves multiple key hyperparameters (e.g., the KL divergence adjustment coefficient, weights in the memory module) that significantly affect model performance, but the paper does not provide detailed guidance on tuning them. The complexity of hyperparameter tuning, combined with a lack of explicit guidelines, may hinder other researchers from reproducing and applying the method in different settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "I’m not familiar with causal intervention, so I may not provide professional reviews on the technical part.\nQuestions for other parts, see weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper researches an interesting and critical problem in trajectory prediction.\n2. The organization of the paper is somewhat reasonable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes $C^2INet$ for trajectory prediction, introducing causal intervention to enable continuous trajectory prediction across evolving domains. To address the issue of catastrophic forgetting, they introduce a Continuity Memory Module. Experiments on three datasets demonstrate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Some format issue should be considered:\n1. The citation format is wrongly used. \n2. the vertical space is not proper set, especially on Page 4\n3. Font size of Table 1 is too small, which is weird and does not match the main text\n\nSome Weaknesses about the contents:\n1. I’m so confused about the motivation of the work, especially the necessity of introducing causal intervention into the trajectory prediction task.\n\n2. Existing continual learning methods also take into account the catastrophic forgetting problem by utilizing the experience replay mechanism. What are the differences between those CL methods and $C^2INet$? Can your method solve some critical problems that traditional CL methods cannot?\n\n3. The authors use STGAT and STGCNN as backbones. They were proposed in 2016 and 2019, which are now outdated. The method should be integrated with cutting-edge backbones proposed in 2023 and 2024.\n\n4. The results in terms of ADE/FDE on the SDD dataset are too high and seem strange. All results are above 50.0. Do you use a particular coordinate system instead of standard pixel coordinates?\n\n5. Related works are not sufficient. More recent works from the past two years should be incorporated, such as diffusion models [1][2][3] and works with new settings [4][5].\n\n[1]Stochastic Trajectory Prediction via Motion Indeterminacy Diffusion\n\n[2]BCDiff: Bidirectional Consistent Diffusion for Instantaneous Trajectory Prediction\n\n[3]Universal Trajectory Predictor Using Diffusion Model\n\n[4]ITPNet: Towards Instantaneous Trajectory Prediction for Autonomous Driving\n\n[5]Adapting to Length Shift: FlexiLength Network for Trajectory Prediction"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper analyzes the spurious correlations and proposes a plug-and-play Continual Causal Intervention framework for trajectory prediction. Meanwhile, we innovatively solve the problem of catastrophic forgetting in practical applications."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024cinet,\ntitle={C\\${\\textasciicircum}\\{2\\}\\${IN}et: Realizing Incremental Trajectory Prediction with Prior-Aware Continual Causal Intervention},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5IvTw0qMKj},\nnote={under review}\n}"
},
"abstract": {
"value": "Trajectory prediction for multi-agents in complex scenarios is crucial for applications like autonomous driving. However, existing methods often overlook environmental biases, which leads to poor generalization. Additionally, hardware constraints limit the use of large-scale data across environments, and continual learning settings exacerbate the challenge of catastrophic forgetting. To address these issues, we propose the Continual Causal Intervention (C$^{2}$INet) method for generalizable multi-agent trajectory prediction within a continual learning framework. Using variational inference, we align environment-related prior with posterior estimator of confounding factors in the latent space, thereby intervening in causal correlations that affect trajectory representation. Furthermore, we store optimal variational priors across various scenarios using a memory queue, ensuring continuous debiasing during incremental task training. The proposed C$^{2}$INet enhances adaptability to diverse tasks while preserving previous task information to prevent catastrophic forgetting. It also incorporates pruning strategies to mitigate overfitting.\nComparative evaluations on three real and synthetic complex datasets against state-of-the-art methods demonstrate that our proposed method consistently achieves reliable prediction performance, effectively mitigating confounding factors unique to different scenarios. This highlights the practical value of our method for real-world applications."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Trajectory Prediction",
"Causal Intervention",
"Variational Inference",
"Continual Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/58b363361c38ca87c37a17ad26e04fa3d7ac4459.pdf"
},
"presentation": null,
"primary_area": {
"value": "causal reasoning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/6f57a3aa929b070aae01efea2c3e96bdac133eff.zip"
},
"title": {
"value": "C$^{2}$INet: Realizing Incremental Trajectory Prediction with Prior-Aware Continual Causal Intervention"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5J9B7Sb8rO | Quantized Spike-driven Transformer | main | Active | Spiking Neural Network+Spike-driven+Quantized Spiking Transformer+ Neuromorphic Computing | applications to neuroscience & cognitive science | 3;5;5;6;6 | 4;5;5;4;5 | 3;3;3;3;3 | 3;2;2;3;3 | 3;3;2;3;3 | 5 | 4.6 | 3 | 2.6 | 2.8 | 0.372678 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper aims to address the problem of performance degradation of the Spike Transformer after quantization, attributing it to the spiking information distortion (SID) problem. The authors presents a loss function based on mutual information maximization to tackle the problem.\n2. The authors conduct experiments across various tasks to demonstrate the effectiveness of the proposed method.\n3. The paper is well organized and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a Quantized Spike-Driven Transformer, addressing the spike information distortion during quantization caused by the bimodal distribution of Quantized Spike-Driven Self-Attention (Q-SDSA). A bi-level optimization strategy is introduced, incorporating information-enhanced LIF and fine-grained distillation to rectify the distribution of Q-SDSA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary reason for the improved quantization performance of the proposed method is the use of multi-bit spikes instead of traditional 0-1 spikes. Specifically, the implementation extends to 4 virtual timesteps, which inevitably reduces the training and inference efficiency. However, the manuscript does not provide a detailed explanation or analysis of this trade-off, which would be beneficial for understanding the overall impact on efficiency.\n2. The empirical comparison can be done in a more thoroughly by comparing with other latest state-of-the-art methods.\n3. Some content in the paper seems unnecessary, such as Appendix A, which does not contribute significantly to the main arguments or findings and could be omitted for conciseness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. To make the ImageNet results table solid, authors can add additional results such as QKFormer [1] and previous QAT-ViT models [2].\n2. I just wondered why only small sizes of architecture (1.8M, 3.9M, 6.8M) are used for training the ImageNet dataset. Is there any scalability issue with this method?\n3. This work uses multi-bit spikes during training and knowledge distillation with ANN architecture, which causes huge training overhead in training time and memory. Can you present any analysis of this overhead?\n4. In the transfer learning section, the overall information is insufficient. Which bit-width did you use? and could you provide us the accuracy of CIFAR10/100, and CIFAR10-DVS without transfer learning?\n5. Can the authors provide the firing rate information? Compared to the original Spike-driven Transformer-V2, how has the firing rate changed in the self-attention part?\n\n[1] Zhou, Chenlin, et al. \"QKFormer: Hierarchical Spiking Transformer using QK Attention.\" arXiv preprint arXiv:2403.16552 (2024).\n[2] Li, Yanjing, et al. \"Q-vit: Accurate and fully quantized low-bit vision transformer.\" Advances in neural information processing systems 35 (2022): 34451-34463."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- First approach to quantizing spike-based transformers with SID analysis\n- Solid theoretical analysis and proofs for the proposed methods\n- Competitive accuracy on ImageNet (80.3%) with low energy\n- Extensive experiments on multiple tasks (classification, object detection, segmentation)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents QSD-Transformer, a quantized spike-driven transformer that addresses the challenge of implementing efficient spiking neural networks (SNNs) while maintaining performance. The work shows three key points: (1) a lightweight quantized baseline for spike-driven transformers, (2) a bi-level optimization strategy to address spike information distortion (SID), and (3) information-enhanced LIF (IE-LIF) neurons with fine-grained distillation for improved performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Lack of comparison with the previous SNN and quantized ANN transformer models (Connected to question #1)\n- Limited scalability in ImageNet experiments (Connected to question #2)\n- Huge training overhead compared to the conventional spike-based transformer due to multi-bit spike and knowledge distillation (Connected to question #3)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Based on Information entropy, this paper proposes a bi-level optimization strategy, which mitigates quantization-induced performance drops in baseline quantized spiking Transformers. \n\n2. The proposed IE-LIF spike neuron is hardware-friendly and converts to a binary-spiking LIF neuron during inference to maintain spike-driven nature.\n\n3. Experimental results on ImageNet and a large number of vision tasks (detection, segmentation, and transfer learning) show that the method is effective and energy-efficient on various spike-based transformers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "To tackle the issues of high-parameter spike-based Transformer in resource-constrained applications and the low performance of directly quantized spike-based Transformers, this paper introduces a Quantized Spike-driven Transformer. It uses a bi-level optimization strategy, including an Information-Enhanced LIF neuron and fine-grained distillation, to counteract quantization-induced performance degradation. The comparative and ablation experiments demonstrate the effectiveness of the proposed methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors describe the implementation method of surrogate gradients for binary spikes in the appendix. However, the proposed IE-LIF in the main text is multi-bit. Could the authors explain how the aforementioned surrogate gradients are deployed in the proposed neurons?\n\n2. Fast-SNN [1] converts quantized ANNs to SNNs and is a competitive ANN-to-SNN method. Like this paper, it aims to reduce quantization error and employs quantization techniques to enhance energy efficiency. The basic idea of Fast-SNN is to reduce both quantization error and accumulating error, achieving excellent performance on many vision tasks (classification, detection, and segmentation). Could you include comparative experimental results with Fast-SNN?\n\n3. There are some typos in the text, such as the equation (1) on line 164 and on line 292, it seems you intended to reference Fig 1(b). \n\n[1] Hu, Yangfan, et al. \"Fast-SNN: fast spiking neural network by converting quantized ANN.\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 12, pp. 14546-14562, Dec. 2023, doi: 10.1109/TPAMI.2023.3275769."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why do the IE-LIF neuron and fine-grained distillation that reduce the energy consumption? Do these technologies reduce the number of synaptic operations?\n2. Why the energy reduction in the COCO2017 dataset is not significant?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written and easy to follow.\n2. The IE-LIF neuron combines the conversion algorithm and training algorithm, which is novel.\n3. The experimental results are significant."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a quantized spike-driven transformer and increases the accuracy of the SNN by proposing an information-enhanced LIF model and fine-grained distillation from the lower level and upper level respectively. Experiments show that these technologies reduce energy consumption significantly while increasing the accuracy of SNN."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The training of IE-LIF neurons does not utilize temporal information, which is not suitable for temporal benchmarks.\n2. The reason why the IE-LIF neuron and fine-grained distillation that reduces the energy consumption is not provided."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. I wonder whether you quantized the membrane potential or not. If you didn't quantize the membrane potential, it seems hard to implement your method on hardware."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Extensive experiments and ablation studies on Image Classification, Object Detection and Semantic Segmentation.\n2. The proposed Information-Enhanced LIF(IE-LIF) neuron is effective to rectify the information distribution in Q-SDSA through Information Theory.\n3. Clear writing and methodology."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The author proposed Quantized Spike-Driven Transformer(QSD-Transformer) to tackle with the spike information distortion (SID) challenge resulted from quantized spike-driven self-attention (Q-SDSA). The author addressed the problems through two levels: 1) Information-Enhanced LIF(IE-LIF) neuron to rectify the information distribution in Q-SDSA at the lower level. 2) A fine- grained distillation scheme for the QSD-Transformer to align the distribution in Q-SDSA with that in the counterpart ANN at the upper level. QSD-Transformer achieved promising results on multiple computer vision tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparison between ANN2SNN and Direct Training methods is limited. Currently, MST is no longer the SOTA method on ANN2SNN method. SpikeZIP-TF (ICML 2024) [1] and ECMT (ACM MM 2024) achieve better performance on Image Classification tasks. The performance of SpikeZIP-TF and ECMT on ImageNet surpass QSD-Transformer by a large margin. In addition, ANN2SNN methods has advantage on saving computational resources compared with Direct Training methods. It is recommended that the author should conduct a more comprehensive comparison between those two methods.\n2. The method proposed by the author is somewhat cumbersome, did the training time provided by the authors in appendix include the training time of FGD?\n3. It is recommended that the author should extend the method to NLP tasks to verify the transferability of QSD-Transformer.\n\n[1] Kang You*, Zekai Xu* et al. SpikeZIP-TF: Conversion is All You Need for Transformer-based SNN. International Conference on Machine Learning 2024\n[2] Zihan Huang, Xinyu Shi et al. Towards High-performance Spiking Transformers from ANN to SNN Conversion. ACM Multimedia 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We proposed a quantized spike-driven Transformer that achieves state-of-the-art results on various vision tasks and maintains a tiny model size."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024quantized,\ntitle={Quantized Spike-driven Transformer},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5J9B7Sb8rO},\nnote={under review}\n}"
},
"abstract": {
"value": "Spiking neural networks (SNNs) are emerging as a promising energy-efficient alternative to traditional artificial neural networks (ANNs) due to their spike-driven paradigm.\nHowever, recent research in the SNN domain has mainly focused on enhancing accuracy by designing large-scale Transformer structures, which typically rely on substantial computational resources, limiting their deployment on resource-constrained devices.\nTo overcome this challenge, we propose a quantized spike-driven Transformer baseline (QSD-Transformer), which achieves reduced resource demands by utilizing a low bit-width parameter. \nRegrettably, the QSD-Transformer often suffers from severe performance degradation.\nIn this paper, we first conduct empirical analysis and find that the bimodal distribution of quantized spike-driven self-attention (Q-SDSA) leads to spike information distortion (SID) during quantization, causing significant performance degradation. To mitigate this issue, we take inspiration from mutual information entropy and propose a bi-level optimization strategy to rectify the information distribution in Q-SDSA.\nSpecifically, at the lower level, we introduce an information-enhanced LIF to rectify the information distribution in Q-SDSA.\nAt the upper level, we propose a fine-grained distillation scheme for the QSD-Transformer to align the distribution in Q-SDSA with that in the counterpart ANN.\nBy integrating the bi-level optimization strategy, the QSD-Transformer can attain enhanced energy efficiency without sacrificing its high-performance advantage.\nWe validate the QSD-Transformer on various visual tasks, and experimental results indicate that our method achieves state-of-the-art results in the SNN domain.\nFor instance, when compared to the prior SNN benchmark on ImageNet, the QSD-Transformer achieves 80.3\\% top-1 accuracy, accompanied by significant reductions of 6.0$\\times$ and 8.1$\\times$ in power consumption and model size, respectively."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Spiking Neural Network+Spike-driven+Quantized Spiking Transformer+ Neuromorphic Computing"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/34ff5753c3ec4facb93aae5102b21f818315cb93.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to neuroscience & cognitive science"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/49394c33d008add36d803dec0cc0b23381734098.zip"
},
"title": {
"value": "Quantized Spike-driven Transformer"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
5JOxazmj8b | From Link Prediction to Forecasting: Information Loss in Batch-based Temporal Graph Learning | main | Active | Graph Neural Network;GNN;Temporal Graph;Dynamic Link Prediction;Dynamic Graph;Temporal Graph Learning;Dynamic Graph Learning;Temporal Graph Neural Network;TGNN;DyGNN;Dynamic Graph Neural Network | learning on graphs and other geometries & topologies | 3;5;5;5 | 5;4;2;3 | 2;3;3;3 | 1;2;2;2 | 3;3;2;3 | 4.5 | 3.5 | 2.75 | 1.75 | 2.75 | -0.774597 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the \"Weaknesses\" section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "*S1* This paper addresses an important yet overlooked issue in temporal link prediction task, i.e., fixed batch size evaluation can distort the task itself by losing or introducing extra information.\n\n*S2* The authors provide extensive data illustrations and quantitative results to facilitate understanding, demonstrating that each dataset has a distinct interaction distribution and how fixed batch-size evaluation can alter the task characteristics.\n\n*S3* This paper formulates a new task setting, *link forecasting*, and offers implementation and reproduction of existing methods to provide valuable insights."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper highlights an overlooked issue in the evaluation of dynamic link prediction task: fixed batch-size evaluation alters the task properties, as for continuous-time temporal graphs, leading to inconsistent duration evaluations across batches; for discrete-time temporal graphs, leading to possible data leakage due to additional introduced temporal dependencies\n\nTo explore this issue in depth, the paper first defines a quantitative metric, NMI, to measure information loss, and conducts extensive empirical analysis to demonstrate how fixed batch sizes distort the task setting. It then formulates a fairer setting, *link forecasting*, enabling consistent time durations for each evaluation batch. Finally, the authors reproduce experiments on existing methods within this reformulated task to reveal their true performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I appreciate the issue raised in this paper and the extensive empirical analysis that clarify the motivation behind the study. However, for the experiments on existing methods within the formulated link forecasting task, I think further discussion is required, e.g., **the reasons for performance changes across diverse settings can be addressed.**\n\n The authors explain why memory-based methods tend to experience performance degradation on discrete-time graphs, which is appreciated. However, it would be beneficial to discuss why other methods might improve in this setting. Additionally, the performance trends for continuous-time graphs appear mixed, potentially due to specific dataset characteristics. I think a more in-depth discussion on these points would enhance the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See **W1**-**W4**."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**S1**. The authors have conducted a great amount of experiments to illustrate the limitations of existing techniques as well as the effectiveness of the proposed method.\n\n**S2**. The limitations of the evaluation of dynamic link prediction are significant but seldom considered and addressed in most existing studies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors considered the evaluation of dynamic link prediction on both discrete-time dynamic graphs (DTDGs) and continuous-time dynamic graphs (CTDGs). They first provided a series of empirical analysis results to demonstrate the limitations of existing batch-based evaluation strategies. A novel time-window-based approach was further proposed to address these limitations. The authors have also validated the effectiveness of the proposed evaluation approach on various public DTDGs and CTDGs datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**W1**. Some statements regarding the research gaps of existing techniques and motivations of this paper are weak, unclear, or confusing.\n\n According to my understanding, this study focuses on the evaluation of dynamic link prediction. However, the title of this paper uses the terminology 'temporal graph learning', which may not be consistent with the major topic of this study. From my perspective, learning may also include the training procedure (e.g., training algorithms, training losses, etc.), addition to evaluation of the inference procedure.\n \n The author claimed that 'within each batch, edges are typically treated as if they occurred simultaneously, thus discarding temporal information within a batch'. To some extent, I do not agree with this statement. According to my understanding, nodes or edges in most TGNNs are encoded as embeddings (i.e., low-dimensional vector representations), which usually involves the **temporal encodings**. In this sense, the temporal information has not been discarded. It is recommended to give some more toy examples about why and how is the temporal information discarded, especially for the case with temporal encodings.\n \n I also respectfully disagree with the definition of dynamic link prediction in Section 2, which was claimed to 'predict whether $(u, v) \\in B_i^+$ or $B_i^-$ in terms of batches $B_i^+$ and $B_i^-$. According to my understanding, a dynamic link prediction model should be able to predict all the possible edges at a specific timestamp but not within a batch.\n \n The authors claimed that existing techniques may hinder the fair comparison of methods. However, it is unclear for me how to define and measure the fairness of comparison. A formal definition regarding this point is also recommended.\n\n From my perspective, using 'temporal link prediction' and 'temporal link forecasting' as two terminologies with different definitions may not be a good presentation, which may result in potential ambiguity issues, since they are more likely to be synonyms in natural languages. It is recommended to use a clearer terminology to replace 'dynamic link forecasting' (e.g., batch-based and window-based evaluation) that can help better distinguish between 'temporal link prediction' and 'temporal link forecasting'.\n\n***\n\n**W2**. There seems to be some flaws for the proposed evaluation approach.\n\n In Section 3.2, the authors discussed one possible limitation that the proposed approach 'cannot preclude memory overflows entirely'. A possible solution was then discussed, which 'splits large time windows into smaller batches for GPU-based gradient computation'. In this sense, the proposed method still used the old bath-based technique, which may still 'ignore the temporal information within each batch', as claimed at the very beginning of this paper.\n \n According to my understanding, the proposed method may also suffer from the empty window issue, where there are no edges in a 'pre-defined' window, due to the heterogeneous distribution of temporal edges. However, there are no discussions regarding this limitation and possible solutions.\n\n***\n\n**W3**. Some details of experiments need further clarification. Some additional experiments are also recommended.\n\n In the empirical analysis of this paper, NMI is a significant metric to 'measure the temporal information that is retained after dividing edges into batches'. However, the formal definition regarding how to compute such a measurement is not given. As a result, it is still hard for me to understand the physical meaning of NMI using in the empirical analysis.\n \n According to my understanding, the proposed time-window-based approach introduces another hyper-parameter $h$. However, there seems no parameter analysis regarding different settings of $h$ (like the empirical analysis shown in Fig. 3).\n\n\n***\n\n**W4**. Although the authors have provided a great amount of empirical results to demonstrate the limitations of existing techniques and validate the effectiveness of the proposed approach, it is better to provide some theoretical guarantees w.r.t. the evaluation of dynamic link prediction."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. It's unclear to me what the authors mean when they assert that DLP yields different amounts of information loss for different models. It seems to me that, if the batch size is fixed across all models, then it seems to me that the information loss (or really, the inter-batch leakage) will be equivalent. In what ways are they not? Or is the assertion that because batch size is often treated as a (often implicit) hyperparameter, so model comparisons can be implicitly unfair?\n2. Does LF suffer from the same temporal training-leakage that DLP does when h is larger than infinitesimal?\n3. Which is more significant? Batches containing variable time gaps, or the identified temporal leakage?\n4. The primary motivation to use larger batch sizes is the reduction of wallclock time during training. How does the training time for LF scale as a function of h?\n5. How do the results reported in table 2/3 change if the model was trained using a DLP pipeline and then evaluated on both DLP/LF?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The problem with variable-time batch construction for DLP is made quite clear in section 3. The examples are easy to understand\n2. The experiments in section 4 are nearly exhaustive. The models cover the big TGNNs out there, and the datasets cover many of the major datasets out there.\n3. The problem statement is clearly articulated and easy to understand. I had no problem implementing the discrete time version of the data loader in an afternoon of work. This clarity is commendable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces a reformulation of the dynamic link prediction (DLP) task as link forecasting (LF). LF differs from DLP in that LF ensures that each batch corresponds to a fixed time resolution. This small change to the problem statement yields relatively significant changes to the observed accuracies of these models on benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the problem with DLP seems clear, it is hard for me to see why this requires the definition of a new task given that it's a straightforward modification of an existing one. I would recommend that the authors expand on this in the work to further draw the distinction, _or_ to assert that this is the way that DLP should be done in the future.\n3. It appears that the models were not hyperparameter tuned for this new task. It stands to reason that because the new involves training on potentially quite different batch structures than the batches those hyperparameters were tuned for, that the reported model performance might be an underestimate. I would suggest that the authors investigate the hyperparameter sensitivity of the models training using LF.\n4. The experimental details are a bit hard to follow in section 4. It appears that what was done is the authors trained a model using the horizon-based strategy, and then used this model to evaluate on both the horizon and batch based evaluation strategies. They then reported the performance differences. I would ask that the authors clarify these experimental details."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I currently lean towards a rejection of the manuscript but am very open to further discussion and increasing the based on how well my concerns get addressed. I encourage the authors to refer to the identified weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper presents a study that provides a novel perspective on the internal issues with the batch-oriented evaluation protocol. The overall presentation is clear, and it includes adequate background information. The experiments related to the new evaluation protocol are relatively comprehensive, with re-evaluation of numerous existing methods. However, there are some missing aspects (as noted in the identified weaknesses) that should be addressed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work tackles the issue of information loss and leakage in the batch-oriented evaluation protocol for temporal graph learning. It first validates the existence of such loss and leakage through experiments that compute the NMI between batches and timestamps. To address this, the authors propose a new evaluation protocol for link forecasting and re-evaluate numerous existing methods within this framework. The provided code in the supplementary material appears comprehensive and sufficient."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. Figure 2 and Figure 3 offer trivial or intuitive results, which seem to be redundant in the main text. I would suggest to just put part of them in the main text and the remaining in the appendix. Otherwise, these two figures would attract too much attention from the readers and make them lost before reading your point (Figure 4).\n\nW2. The NMI may not be a trivial concept for common readers, therefore it would be helpful to include some technical details of it in the appendix and link to it at line 263. The current presentation of this paragraph is quite confusing as I cannot understand what lines 266-268 mean (how the NMI was computed). Maybe some formulation would help improve the clarification here.\n\nW3. While the issue with the batch-oriented evaluation protocol is effectively identified using NMI, the paper does not adequately explain how this impacts previous evaluations and benchmarks that use this protocol. For instance, could you provide experimental validation to demonstrate the biases or flaws in past evaluations? The experiments in the current manuscript are associated with the new link forecasting protocol. Still, I am confused about how to experimentally validate that this protocol is better than the previous ones. \n\nW4. One advantage of the batch-oriented evaluation is the efficiency, but there seems to be no comparison or comment regarding the training/evaluation efficiency of the link forecasting protocol. I am aware of line 393, Computational cost, but I think such a not in-depth analysis is not enough. Some experimental results could be included.\n\nW5. This is a relatively minor point, as is included in the limitation. For the frameworks that consider the temporal link prediction problem as a ranking task, is it possible to have more discussions about the pros/cons of time-window-based approaches and ranking approaches?\n\nMinor: \n1. line 143, should be {$t_{b\\cdot i}\\, ..., t_{b\\cdot (i+1)-1}$}? Also, line 244 seems to have the same mistake.\n2. line 146, 147. if there is no constraint on (u,v), then it's possible that (u,v) is not in the B+ or B- batch. Is something like $(u, v) \\in B^+ \\cup B^-$ missing? I think similar issue exists for you link forecasting definition, line 379.\n3. line 346. \"negative edges that do not occur in time window [i · h,(i + 1) · h)\". I feel it ambiguous. is those negative edges not in this time window, or in this time window but do not occur (interact)? Should be the latter?\n4. Actually it could be an independent section for related work. I would recommend commenting on how existing TGNN benchmarks flow to validate that the widely-used batch-oriented training/evaluation does have issues that this work is trying to fix.\n5. The code implementation largely builds upon an existing framework, DyGLib, and it would be helpful to include comments or documentation clarifying where the extensions and modifications occur at the code level (code snippets) in the appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Current dynamic link prediction evaluation practices do not properly account for temporal graph data. We propose an alternative evaluation strategy fixing these issues."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024from,\ntitle={From Link Prediction to Forecasting: Information Loss in Batch-based Temporal Graph Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=5JOxazmj8b},\nnote={under review}\n}"
},
"abstract": {
"value": "Dynamic link prediction is an important problem considered by many recent works\nproposing various approaches for learning temporal edge patterns. To assess their\nefficacy, models are evaluated on publicly available benchmark datasets involving\ncontinuous-time and discrete-time temporal graphs. However, as we show in this\nwork, the suitability of common batch-oriented evaluation depends on the datasets’\ncharacteristics, which can cause multiple issues: For continuous-time temporal\ngraphs, fixed-size batches create time windows with different durations, resulting in\nan inconsistent dynamic link prediction task. For discrete-time temporal graphs, the\nsequence of batches can additionally introduce temporal dependencies that are not\npresent in the data. In this work, we empirically show that this common evaluation\napproach leads to skewed model performance and hinders the fair comparison of\nmethods. We mitigate this problem by reformulating dynamic link prediction as a\nlink forecasting task that better accounts for temporal information present in the\ndata. We provide implementations of our new evaluation method for commonly\nused graph learning frameworks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Network",
"GNN",
"Temporal Graph",
"Dynamic Link Prediction",
"Dynamic Graph",
"Temporal Graph Learning",
"Dynamic Graph Learning",
"Temporal Graph Neural Network",
"TGNN",
"DyGNN",
"Dynamic Graph Neural Network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5197c0eda54252c4c3e0cc14fbfdacc18e9c2884.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/8e042f84d4e56f70fda79957ece6872e3f79f5a8.zip"
},
"title": {
"value": "From Link Prediction to Forecasting: Information Loss in Batch-based Temporal Graph Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |