id
stringlengths 10
10
| title
stringlengths 3
179
| track
stringclasses 1
value | status
stringclasses 3
values | keywords
stringlengths 2
2.39k
| primary_area
stringclasses 21
values | author
stringclasses 501
values | authorids
stringclasses 501
values | aff
stringclasses 1
value | aff_domain
stringclasses 1
value | position
stringclasses 1
value | rating
stringclasses 355
values | confidence
stringlengths 0
19
| soundness
stringclasses 642
values | contribution
stringclasses 596
values | presentation
stringclasses 782
values | rating_avg
float64 0
9
| confidence_avg
float64 0
5
| soundness_avg
float64 0
4
| contribution_avg
float64 0
4
| presentation_avg
float64 0
4
| corr_rating_confidence
float64 -1
1
| project
stringclasses 1
value | github
stringclasses 1
value | Review
listlengths 2
10
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
y59zhBNKGZ | Towards Making Linear Attention Usable | main | Active | Linear Attention;Kernel Separation;Transformers;Memory Reduction | infrastructure, software libraries, hardware, systems, etc. | 3;3;3;5 | 4;4;4;3 | 2;2;2;2 | 2;2;1;2 | 2;2;2;2 | 3.5 | 3.75 | 2 | 1.75 | 2 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I’d like to rate the current submission reject due to limited technical contributions and lack of convincing experiments. The paper needs significant changes including new experiments and possibly methodological improvements in justifying the practical use behind the proposed method."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well written and easy to follow.\n- Interesting topic -- making linear attention efficient is of great interest to the research community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper attempts to make linear attention efficient and useable by reducing the memory consumption and introducing an alternative mechanism to dropout. Results show the usefulness of the proposed approaches, while maintaining the linear scaling in N in both wall-clock time and memory usage."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The results are somewhat unsatisfactory, with only limited to a very small model. More experiments and analysis are needed to justify the effectiveness of the proposed method.\n- What about practical usage? Can this method start from the pre-trained LLMs and directly convert them to efficient linear attention? Also, can it start from a LLM with linear attention and make it efficient through little amount of training or finetuning?\n- Can this method scale to large models? Say, billion scale models. Authors at least perform experiments using 1-3B models.\n- What about hardware efficiency of the proposed changes to the linear attention?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Same Weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "In terms of practicality, reducing memory cost is very crucial. If this work maintains comparable performance or minimizes the performance drop, it has impactful potential."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses two major limitations in current linear attention mechanisms. First, although existing approaches aim to reduce computational complexity to linear time, they still practically require O(ND²). To address this, this work proposes a method to lower the memory usage to O(ND). By deriving and computing the attention layer gradient analytically, this paper achieves this efficiency without relying on a differentiable programming library, thus avoiding the need to store variables for backpropagation. This work uses Factorization in both forward and backward passes and validates the reduced memory and time complexity in the context of attention layers and large language model training. Additionally, because linear attention doesn’t inherently compute an attention matrix, dropout cannot be directly applied. To overcome this, this work introduces an alternative mechanism that emulates the effect of dropout, with its effectiveness confirmed through an ablation study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Lacks of benchmarks\nAlthough it shows memory and latency experiments by increasing token length, it didn't show common LLM benchmarks such as MMLU and GLUE or long-context LLM benchmarks such as n streaming books (PG19), Long Context Understanding (LongBench), and book summarization (Booksum)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How does the proposed method compare with those RNN-like methods such as Mamba and RetNet?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The Motivation is clear.\n\n- The theoretical derivation is detailed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript proposes two techniques to improve the practical efficiency of linear attention. One is the factorization of matrix multiplication, and the other is a modified dropout technique. Toy experiments show the effectiveness of the two techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, this manuscript is not ready for publication. My concerns are listed as follows:\n\n- The technical novelty is limited. The first proposed method, matrix multiplication multiplication, is a standard operation in linear attention methods [1]. I see some novelty in the second dropout technique, but its effectiveness is validated only in small-scale experiments.\n\n- The experiments are not convincing enough. One small model (Pythia-14M) is adopted, and only a training curve is presented. To make the results more convincing, it is recommended to conduct experiments on larger-scale models (both language and vision models should be included) and well-known benchmarks. The comparison should be the about the trade-off between efficiency and performance.\n\n- The presentation needs substantial improvements. Some examples are listed as follows:.\n - In the introduction, the authors categorize linear attention methods into two lines of work: Sparsified/localized Attention, and Kernel Separation. However, no references are cited here.\n - In the equations (e.g. eq1), the index subscriptions $i,n,j$ should not have bold fonts.\n - In Sec. 3.1, the FLOPs calculation example could be given by normal matrix presentations instead of a 3x3 toy example.\n - The derivaton in Sections 3&4 are too detailed and might distract readers from the main method. It is recommended to put detailed derivation process in the appendix, and present the major method in the main text.\n\n\n[1] Flatten transformer: Vision transformer using focused linear attention. ICCV, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In section 3.1, the final operation we need to calculate is \n\n[a_1, a_2, ..., a_n]^T \\times [b_1, b_2, ..., b_n] \\times [d_1, d_2, ..., d_n]^T\n\nIf we compute the first \\times first, there will be n^2+n^2 multiplications and n*(n-1) accumulations. If we compute the second \\times first, there will be (n+n) multiplications and (n-1) accumulateions.\n\nSo the point sec 3.1 made it that, change the operation order will save FLOPs. Do I miss something?\n\n2. The new dropout alternative seems complicated. It seems no experiments discuss its complexity. Will it take much inference latency?\n\n3. Overall, I give my initial rating as \"marginally below the acceptance threshold\". I hope the authors can solve my concerns as I mentioned in weakness and questions section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The topic of saving the memory footprint of linear attention is interesting. This is important while it seems few researcher have noticed.\n\n2. The forward and backward derivation is also very clean.\n\n3. An alternative for dropout is proposed, and the experiments in LRA and Tiny Stories datasets verifies its effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper focus on reduce the space complexity of vanilla linear attention. To be specific, the authors change the order of some matrix multiplication in the forward pass of linear attention when the kernel is linear. The authors also provide a detailed derivation of the forward pass and backward pass. Because linear attention does not have a explicit attention map in the forward pass, the authors propose an alternative for the dropout regularization mechanism. Some testing experiments shows this method are indeed save the memory footprint, but the experiment is not abundant."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) After the forward and backward modification, althought the computational seems equivalent compared with linear attention, will the proposed method achieve the same performance is still unclear. The authors can check the RepVGG paper, which shows that equivalent computational may not produce the same performance.\n\n(2) The paper only change is computing order of Linear Attn, and proposed a new dropout alternative. I think the novelty is limited.\n\n(3) Lack of experiment.\n\n (3.1) for the time and memory scaling experiments (sec. 5.1). The authors only show one fixed number of head H, fixed head dim D, varying token length result experiment, and, one fixed token length N, varying head dim D experiment. I think the author should show more results on the H, D choices, since this experiment is training free, and whether the experiment results would show the same trend. Additionally, the only chosen one is weird. Since in ViT-Base H=12 and D=64, in DiT-S H=6 and D=64, I cannot find a popular model with H=16 and D=32.\n\n (3.2) For LLM experiment in Sec. 5.2, the author does not express the reason why they choose this model and this dataset. And they only show the loss curve (also the orange curve is not trained as long as the blue one is). No evaluation metric provided.\n\n (3.3) For the ablation study in Sec. 5.3, I do not get the point why the author do not provide these accuracy results in main results, but put them in ablation. The training hypermeter is also not clear. I have also check the LRA results in other papers (e.g. S4, by Albert Gu in ICLR 2022, https://arxiv.org/pdf/2111.00396). The results of LRA in the submission are far lower than this 2 years ago paper. And both the Softmax result and Linear(alt. drop.) results have different numerical range compared with the results in S4 paper.\n\n(4) The experiment hyper-parameter is not described. That may be the reason why it cannot match the results in S4 paper. I think the authors should follow others setting, or explain why they choose a different setting.\n\n(5)Minors:\n\n (5.1) wrong formula in line 90: f(x) = exp(q \\cdot k / root(D)), while there is no x in the expression.\n\n (5.2) a latex bug in appendix D.1 (line 860) have not be fixed."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a method to reduce the memory complexity of Kernel Separation in Transformer attention from $O(ND^2)$ to $O(ND)$, where $N$ is the number of tokens and $D$ dimension per attention head. We also introduce an alternative dropout mechanism."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Making Linear Attention Usable},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y59zhBNKGZ},\nnote={under review}\n}"
},
"abstract": {
"value": "The original Transformer attention mechanism, based on Softmax, has time and memory complexities of $O(N^2D)$ and $O(N^2)$, where $N$ is the number of tokens and $D$ the dimension per attention head. As current LLM applications trend towards processing larger token sequences, and Transformers gain popularity in image, video, and audio processing, addressing this quadratic cost becomes imperative. Since the introduction of Transformers, numerous approaches have been proposed to linearize this scaling. One such method is Linear Attention, which captures all-to-all token pair attention in $O(ND^2)$ time. However, its drawback lies in its high memory footprint of $O(ND^2)$. While Linear Attention has shown promise in small-scale benchmarks, the high memory demand has prevented Linear Attention to be studied in context of large benchmarks and practical use cases. In this work, we demonstrate how to reduce the memory complexity to $O(ND)$ by approaching calculations from a novel perspective. Additionally, since Linear Attention does not compute the attention matrix directly, it precludes the use of traditional dropout. To address this, we introduce an alternative dropout mechanism. Our study confirms linear scaling in both wall-clock time and memory usage. We also compare our method with Flash Attention and conduct an ablation study on our proposed dropout alternative."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Linear Attention",
"Kernel Separation",
"Transformers",
"Memory Reduction"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d8f7f3dd6f560ff14c6886d10f6246651f21ea96.pdf"
},
"presentation": null,
"primary_area": {
"value": "infrastructure, software libraries, hardware, systems, etc."
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Making Linear Attention Usable"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y5B0ca4mjt | PIG: Physics-Informed Gaussians as Adaptive Parametric Mesh Representations | main | Active | Gaussians;Physics-informed Deep Learning | applications to physical sciences (physics, chemistry, biology, etc.) | 5;6;6;8 | 5;2;4;4 | 2;3;3;4 | 2;3;3;3 | 4;3;3;4 | 6.25 | 3.75 | 3 | 2.75 | 3.5 | -0.157895 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- With the work SIREN in mind, I wonder if the Gaussian basis is the best choice?\n- The idea is quite similar to 3D Gaussian splatting in rendering. However, there are differences. One easy way to explain is that, we can think of the volume rendering equation as a specific type of governing physics equation. Then the success of applying parametric mixed Gaussian in one specific type of equations doesn't mean it is the best idea for others. For example, there can be long-range interactions for certain PDEs, while Gaussian functions are local and lack such representation capability. I would like to have the authors' opinions on such comments."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- A theoretical proof of the universal approximation capability for the proposed method is provided.\n- The experiments are diverse, the baselines are sufficient, and the ablation study is detailed.\n- The writing is overall clear and detailed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors use deep Gaussian mixture models as parametric mesh representations to replace MLP in PINN, so that it performs better for high-frequency and non-linear components. Experiments for PDEs such as Allen-Cahn, Helmholtz, Nonlinear diffusion, etc., are conducted compared with several previous PINN derivatives, showing that the proposed method achieves sota solution accuracy."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Though diverse and rich, the experimental settings are all relatively simple, and lack comparisons to traditional methods.\n- Correct me if I am wrong, but I think only the solution accuracy is provided, without the computation time?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- I would be interested to see how the authors address the weaknesses above. Could authors provide additional evidence or context to support the novelty? \n- Have the authors conducted any experiments to demonstrate how the method alleviates spectral bias?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-written and easy to follow. The code and experiments are of good quality. It provides a more concise and more parameter efficient feature embedding method compared to previous parametric grid representations method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose the Physics-Informed Gaussian (PIG) that learns feature embeddings of input coordinates, using a mixture of Gaussian functions. Compared to PINN, this paper applies Gaussians to input coordinates and makes the mean and covariance matrix learnable. The locality of Gaussians encourages the model to capture high-frequency details by effectively alleviating spectral bias."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The primary concern lies in the originality of this method. Though it’s considered as a feature embedding, mathematically, it is in the same spirit of a KAN layer with RBF. Also, it is not uncommon to consider Gaussian embedding in ML. \n\n- The paper states that the approach \"dynamically adjusts the positions and shapes of the Gaussians during training\" with parameters \"iteratively updated throughout the training process.\" The claims in the paper give an impression of adaptive sampling method based on the training. However, the approach does not actually take into account the residual or the loss function landscape. It would be better to provide a more rigid description on this. Also, in the experiments, the method is compared with different PINN approaches for each PDE, and the authors report the results without conducting experiments by themselves again. Though “the sensitivity of PINN variants to hyperparameters complicates fair comparisons”, it’s still necessary to conduct experiments with same setup and number of parameters to ensure a fair comparison. The average error of PIG is generally higher, but the authors only highlight the best of PIG. PIG is claimed to “enable more accurate and efficient approximations”, but the efficiency is not clear. Hence, it seems a lot of conclusions in this paper are overstated.\n\n- I would like to see how the spectral bias can be alleviated, either through experimental evidence or analytical justification.\n\n- For the 2D and 3D cases, what is the mathematical formulation for the Gaussian embedding? How is the parameter being initialized? Additionally, how are the boundary conditions (BC) being constrained?\n\n- The paper mentions that \"computational costs per iteration of our method are significantly lower than JAX-PI.\" A detailed report of these computational costs would be beneficial."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- (Line 245-) \"Similar to the previous parametric grid methods, which obtain feature embeddings by interpolating only neighboring vertices, this locality encourages the model to capture high-frequency details by effectively alleviating spectral bias\": Is it possible to mathematically prove that \"this locality encourages the model to capture high-frequency details by effectively alleviating spectral bias\" in some sense? This is just a question out of curiosity.\n\n- Is the title of Section 3.2.2 relevant?\n\n- Why is tanh used in the architecture (it is fine if there is no specific reason)? What about sin or swish activations?\n\n- Is there any issues about mode collapses ($\\mu_i \\rightarrow \\mu_j$ for all $i$ and $j$ and/or $\\sigma_i \\rightarrow 0$ or $\\infty$)?\n\n- I think the update scales of $W$ and $\\theta$ differ significantly from those of $\\mu_i$ and $\\sigma_i$. The optimal updates for $\\mu_i$ and $\\sigma_i$ likely depend on the scale of the simulation. Did the authors encounter any issues related to this?\n\n- It may be off-topic, but is the proposed method applicable to high-dimensional PDEs, such as $d\\gtrsim100$? How significant is the computational cost?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The paper is well-written and easy to follow. I enjoyed reading it.\n\n- Motivation is clear.\n\n- Comparisons with previous works are well-discussed.\n\n- The proposed model is supported by both theoretical and empirical evidence.\n\n- Error bars are provided, enhancing the reliability of the experimental results.\n\n- Code is provided for reproducibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Physics-Informed Gaussians (PIG) is proposed, an efficient and accurate PDE solver that utilizes\nlearnable Gaussian feature embeddings and a lightweight neural network, counteracting the problems inherent in previous static parametric grid approaches. PIG model achieves competitive accuracy and faster convergence with fewer parameters compared to state-of-the-art methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- (Minor) Figures should be vector images to avoid pixelization.\n\n- While I found no significant weaknesses, there may be room to further expand the discussion on the novelty and its potential impact on future research."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I may miss something but, is there any reason why choose Gaussian functions as feature embedding? Would other embedding functions fail in the proposed framework?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The strength of this paper is to overcomes the limitations of traditional PINNs. The proposed method maintains the benefits of PINNs, improves performance in PDE approximation, and can be applied to various PDEs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Physics-Informed Gaussians (PIGs), which incorporates Gaussian embeddings in the PINNs framework, to eliminate the inductive biases of neural networks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "As mentioned in Discussion and Limitations, theoretical understanding of the convergence properties is lack, although the universality of PIG is discussed in Section 3.3."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024pig,\ntitle={{PIG}: Physics-Informed Gaussians as Adaptive Parametric Mesh Representations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y5B0ca4mjt},\nnote={under review}\n}"
},
"abstract": {
"value": "The approximation of Partial Differential Equations (PDEs) using neural networks has seen significant advancements through Physics-Informed Neural Networks (PINNs). Despite their straightforward optimization framework and flexibility in implementing various PDEs, PINNs often suffer from low accuracy due to the spectral bias of Multi-Layer Perceptrons (MLPs), which struggle to effectively learn high-frequency and non-linear components. Recently, the parametric mesh representations have been investigated as a promising approach to effectively eliminating the inductive biases of neural networks. However, they often require very high-resolution grids and a large number of collocation points to achieve high accuracy while avoiding overfitting issues. In addition, these are limited by the fixed positions of the mesh parameters, hindering their ability to approximate complex PDEs. To overcome these limitations, we propose Physics-Informed Gaussians (PIGs), which combine feature embeddings using Gaussian functions with a lightweight neural network. Our approach uses trainable parameters for the mean and variance of each Gaussian, allowing for dynamic adjustment of their positions and shapes during training. This adaptability enables our model to optimally approximate PDE solutions, unlike models with fixed parameter positions. Furthermore, the proposed approach maintains the same optimization framework used in PINNs, allowing us to benefit from their excellent properties. Experimental results show the competitive performance of our model across various PDEs, demonstrating its potential as a robust tool for solving complex PDEs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Gaussians",
"Physics-informed Deep Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ca455a58a9fa1df1bea38e54158e4710bf6f2244.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/a4ae8d6a4f529c34d8e978c55c597eaff9f4ad8e.zip"
},
"title": {
"value": "PIG: Physics-Informed Gaussians as Adaptive Parametric Mesh Representations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y5G1BfV7Am | X-VILA: Cross-Modality Alignment for Large Language Models | main | Withdraw | Multi-task learning;vision-language models;generative models | foundation or frontier models, including LLMs | Hanrong Ye;De-An Huang;Yao Lu;Zhiding Yu;Wei Ping;Andrew Tao;Jan Kautz;Song Han;Dan Xu;Pavlo Molchanov;Hongxu Yin | ~Hanrong_Ye1;~De-An_Huang1;~Yao_Lu13;~Zhiding_Yu1;~Wei_Ping1;~Andrew_Tao1;~Jan_Kautz1;~Song_Han5;~Dan_Xu4;~Pavlo_Molchanov1;~Hongxu_Yin2 | 3;3;5;8 | 5;4;4;3 | 2;2;2;3 | 2;2;2;3 | 2;2;3;3 | 4.75 | 4 | 2.25 | 2.25 | 2.5 | -0.863868 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "We would like to sincerely thank all reviewers for their valuable feedback, and we have decided to withdraw the submission for further improvement."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* I personally appreciate the architecture that supports multimodal perception and generation. However, it is true that vision and audio alone do not fully encompass the concept of Any-to-Any; other modalities are also prevalent. It appears that the current architecture lacks sufficient scalability for additional modalities. Introducing new modalities often requires re-training the LLM, which poses a limitation that may not be conducive to building a truly Any-to-Any architecture.\n\n* I would like to understand the nature of the interactions between modalities. For instance, how does modeling and learning within the audio modality affect that of the image modality?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper represents a commendable attempt in the field of multimodal integration. The proposed Vehicle for Enhancing Harmony (VEH) offers valuable insights into facilitating the interaction between perception modality encoding and decoding. Overall, the manuscript is well-written and provides a thorough performance evaluation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduc X-VILA, an omni-modality model designed to extend the capabilities of large language models by incorporating image, video, and audio modalities. X-VILA achieves cross-modality understanding, reasoning, and generation within one model. And related capabilities are evaluated on extensive benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **Confusion in Structural Design**: According to the authors, the Embedding Highway design is intended to be applicable across various modalities. However, it is only applied to the visual modality, which appears to contradict the authors' objective of constructing a truly Any-to-Any multimodal model.\n\n* **Novelty of the Model**: To my knowledge, the architecture that employs modality-independent encoding and decoding, integrated through a LLM, has been extensively explored in several prior works (e.g., Next-GPT [1]). This architecture does not seem to demonstrate clear novelty. It would be beneficial for the authors to systematically compare their work with these existing studies to highlight its innovative aspects.\n\n[1] Wu S, Fei H, Qu L, et al. NExT-GPT: Any-to-Any Multimodal LLM[C]//Forty-first International Conference on Machine Learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why use Imagebind as all modality encoders? It seems all tasks performs bad, If we change \"unbind encoders\", such as clip for vision, whisper/beats for audio, the performance could definitely be better. Imagebind only sounds better instead of performance better.\n2. Number of compared methods are too small. Only NextGPT? \n3. \" As depicted in Figure 4, we have identified two key capabilities that have surfaced Long-context cross-modality generation. X-VILA exhibits an impressive capacity for comprehending and combining diverse concepts from multiple iterations of input. Consequently, it produces natural and coherent output, as suggested by the users.\" I don't think the context is long enough to be called \"long-context\" in the example of Figure 4.\n4. The first stage training, so-called \"encoder-LLM-Decoder alignment training\" utilizes the \"X-text pairs\" from academic datasets as in prior work, so what does the decoder train? It is so confused."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.The instructional tuning dataset may be helpful for multi-modality generation research.\n2. Proposed method surpasses Next-GPT on compared benchmarks \n3. Proposed Visual Embedding Highway sounds make sense."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper propose a multi-modality large language model which can model the understanding and generation between audio, image, video and text. It also propose a X-to-X generation instructional tuning dataset. The method is evaluated on multimodal generation benchmark and some general visual/audio QA/caption benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Novelty is limited. Unifing text and visual generation have been studied for a lot, such as Cogview or Emu series. Adding audio modality also have been researched by AnyGPT or NextGPT. So this paper seems more like some performance incresements istead of some new insights, which is not excited enough to me.\n2. Architecture design does not have new insights. Multimodal encoders + projectors + LLM + diffusion decoders are a common way for unifying multimodal understanding and generation. Even though authors emphasize that diffusion models are tuned together which make difference from earlier works, but the architecture still seem not novel and elegant enough. In addition, the visual high way design seems serving for edit-like tasks targetedly instead of a general design. \n3. Performances on visual QA or audio caption benchmarks are too weak."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Is there a planned timeline for releasing the training code and datasets for X-VILA?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of the Visual Embedding Highway (VEH) to preserve and enhance visual features represents a significant advancement over existing methods in visual alignment.\n2. The paper provides a large X-to-X dataset for multi-modal instruction, which is a valuable resource for advancing research in multi-modal modeling.\n3. The authors plan to open-source X-VILA and the dataset, promoting academic and engineering research in multi-modal foundation models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces X-VILA, a model capable of handling various input and output modalities, including video, image, language, and audio. X-VILA employs a two-step alignment mechanism involving textual alignment and a novel visual alignment component called the Visual Embedding Highway (VEH). This mechanism addresses common challenges in multi-modality alignment, such as loss of visual information. The authors contribute a large-scale X-to-X multi-modal instruction dataset to support further research in multi-modal models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Missing Example Component: The example in Figure 1 lacks the \"video\" input component, which disrupts the clarity and completeness of the illustration.\n2. High Computational Requirements: The model’s training process is resource-intensive, potentially limiting accessibility and reproducibility, particularly for smaller research teams."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The introduction of the Visual Embedding Highway (VEH) module to preserve visual details is an original contribution that enhances the model's performance in visual tasks. \n\nThe paper is of high quality, as evidenced by its thorough methodology, comprehensive experiments, and rigorous evaluation. \n\nThe paper is well-structured and clearly written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces X-VILA, an omni-modality model designed to extend the capabilities of large language models (LLMs) by incorporating image, video, and audio modalities. X-VILA achieves cross-modality understanding, reasoning, and generation by aligning modality-specific encoders with LLM inputs and diffusion decoders with LLM outputs. The key contributions of the paper are:\n\n- It presents a new family of any-to-any modality chat LLMs capable of conducting multi-modality conversations, understanding signals from different modalities, and generating content in various formats, including video, audio, image, and text.\n- The paper introduces a new X-to-X multi-modality instruction tuning dataset, which has proven effective for cross-modality alignment. \n- To address the issue of visual information loss, a visual alignment mechanism with a visual embedding highway module is introduced, which allows visual features to bypass the LLM, enhancing the correspondence of visual content between input and output stages.\n- X-VILA demonstrates emergent properties across modalities even in the absence of similar training data, showcasing abilities like long-context cross-modality generation and new types of cross-modality ability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- How do the authors ensure the data quality of the proposed X-to-X dataset, and what is the specific process for its creation?\n\n- Why is the additional module VEH implemented solely in the visual component?\n\n- Compared to Next-GPT, the method in this paper utilizes a larger instruction tuning dataset. Could the authors provide experimental results demonstrating the impact of scaling up the instruction data?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nye2024xvila,\ntitle={X-{VILA}: Cross-Modality Alignment for Large Language Models},\nauthor={Hanrong Ye and De-An Huang and Yao Lu and Zhiding Yu and Wei Ping and Andrew Tao and Jan Kautz and Song Han and Dan Xu and Pavlo Molchanov and Hongxu Yin},\nyear={2024},\nurl={https://openreview.net/forum?id=y5G1BfV7Am}\n}"
},
"abstract": {
"value": "We introduce X-VILA, an omni-modality model designed to extend the capabilities of large language models (LLMs) by incorporating image, video, and audio modalities. By aligning modality-specific encoders with LLM inputs and diffusion decoders with LLM outputs, X-VILA achieves cross-modality understanding, reasoning, and generation. To facilitate this cross-modality alignment, we curate an effective interleaved any-to-any modality instruction-following dataset. Furthermore, we identify a significant problem with the current cross-modality alignment method, which results in visual information loss. To address the issue, we propose a visual alignment mechanism with a visual embedding highway module. We then introduce a resource-efficient recipe for training X-VILA, that exhibits proficiency in any-to-any modality conversation, surpassing previous approaches by large margins. X-VILA also showcases emergent properties across modalities even in the absence of similar training data. The project will be made open-source."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Hanrong_Ye1",
"~De-An_Huang1",
"~Yao_Lu13",
"~Zhiding_Yu1",
"~Wei_Ping1",
"~Andrew_Tao1",
"~Jan_Kautz1",
"~Song_Han5",
"~Dan_Xu4",
"~Pavlo_Molchanov1",
"~Hongxu_Yin2"
]
},
"authors": {
"value": [
"Hanrong Ye",
"De-An Huang",
"Yao Lu",
"Zhiding Yu",
"Wei Ping",
"Andrew Tao",
"Jan Kautz",
"Song Han",
"Dan Xu",
"Pavlo Molchanov",
"Hongxu Yin"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-task learning",
"vision-language models",
"generative models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "ye|xvila_crossmodality_alignment_for_large_language_models"
},
"pdf": {
"value": "/pdf/30af5cc7feabdffec617f6dc7e5d1af6366c4472.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "X-VILA: Cross-Modality Alignment for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
y5einmJ0Yx | GOLD: Graph Out-of-Distribution Detection via Implicit Adversarial Latent Generation | main | Active | Graph Neural Network;Out-of-Distribution Detection | learning on graphs and other geometries & topologies | 6;6;8;8 | 3;2;3;3 | 3;3;4;3 | 3;3;4;3 | 3;2;4;3 | 7 | 2.75 | 3.25 | 3.25 | 3 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Could the author better clarify the key novelty of GOLD compared to existing works? What distinguishes the usage of the generator in GOLD from previous works?\n- The training procedure has two stages: one stage involves fixing the GNN and training the LGM, and the second stage involves fixing the LGM and training the GNN. Is it possible to combine these two stages using a gradient reversal layer?\n- Further ablative studies on $L_{DReg}, L_{Unc}, L_{EReg}$ (removing each of them or applying each individually) may help to better demonstrate the effectiveness of the proposed new divergence regularization (Eq. 12).\n- What modifications would be needed to extend the framework beyond node-level detection, such as to graph-level OOD detection tasks?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- *No OOD Data Required*: Synthesizes pseudo-OOD data through adversarial training, alleviating the need for real OOD samples.\n- *Implementation Flexibility*: Supports both LDM and VAE variants, offering trade-offs between performance and computational efficiency.\n- *Strong Empirical Performance*: Outperforms non-OOD methods and matches/exceeds methods using real OOD data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a framework designed to detect out-of-distribution data without requiring pre-existing OOD datasets or pre-trained models. The proposed GOLD framework includes:\n- A latent generative model to generate synthetic embeddings that imitate in-distribution embeddings from a GNN.\n- A GNN encoder and OOD detector to classify in-distribution data and maximize energy divergence between in-distribution and synthetic embeddings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Using generative models to generate samples for downstream tasks has been widely adopted in previous methods. For example, generated samples are commonly used in continual learning for experience replay. It seems that the use of generators in GOLD applies the same concept to different tasks. Energy-based detection is also widely used. The major contribution seems to lie in a new divergence regularisation (Eq.12).\n- Further ablative studies on divergence regularisations may be needed to better reflect their effectiveness.\n- The presentation needs to be improved."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Regarding the generation process of pseudo-OOD samples based on ID data, if these generated samples are overly close to the ID distribution, it could lead to confusion in the OOD detector, potentially causing ID samples to be misclassified as pseudo-OOD. Could the authors elaborate on whether any specific techniques were applied during training to control this distributional proximity?\n2. Additionally, I noticed that the model exhibits substantial variance in the FPR95 metric on the Amazon and Coauthor datasets. Could the authors clarify whether this variance is linked to the aforementioned distributional control during the generation process?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents a method with notable novelty, particularly in how it handles the generation of OOD (Out-of-Distribution) samples based on ID (In-Distribution) data. This approach demonstrates creative problem-solving and provides a potentially valuable contribution to OOD detection research.\n2. The experimental section is comprehensive, covering multiple datasets and providing a range of performance metrics. This thorough evaluation supports the robustness of the method and suggests that it may perform effectively across diverse scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the GOLD framework, an innovative implicit adversarial learning approach for graph OOD detection that bypasses the need for additional OOD data or pretrained generative models. GOLD employs an alternating optimization scheme, utilizing a latent generative model and a GNN encoder to produce pseudo-OOD embeddings for training, enhancing robustness against OOD instances. Extensive experiments across five benchmark graph datasets demonstrate the framework’s competitive performance compared to state-of-the-art OOD detection methods, even without true OOD data. Minor issues, such as a blank figure and lack of code availability, detract slightly from the work, but overall, the paper makes a valuable contribution to graph-based OOD detection."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Certain issues in the manuscript reduce its overall clarity and precision. For example, Figure 1(d) appears to be blank, which may hinder understanding and interpretation of the paper’s content."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What does subscript (i.e., [0], [1]) in Eq(10) mean?\n\n2. Have the authors tried other backbone models?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- GOLD’s adversarial latent generation is a novel approach that synthesizes pseudo-OOD data without auxiliary datasets, making it efficient and broadly applicable.\n\n- GOLD’s effectiveness is demonstrated through comprehensive experiments on five benchmark datasets, showing its robustness across various graph types and OOD scenarios.\n\n- The implicit adversarial objective and energy-based detection approach lead to a clear divergence between ID and OOD embeddings, validated by experimental visualizations.\n\n- This paper is well-structured."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents GOLD, a novel framework aimed at enhancing Out-of-Distribution (OOD) detection in graph neural networks (GNNs) without relying on external OOD data. GOLD introduces an implicit adversarial training pipeline where a latent generative model (LGM) is trained to generate embeddings that mimic in-distribution (ID) data, while an OOD detector is optimized to increase divergence between ID embeddings and these synthetic pseudo-OOD embeddings. It effectively simulates OOD exposure, helping the model distinguish OOD nodes in graph data. Extensive experiments demonstrate that GOLD outperforms state-of-the-art methods in OOD detection across several benchmark datasets without the need for real OOD data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This paper has no obvious weaknesses except for the training computational cost induced by the pseudo-OOD data. However, this cost increase is acceptable. And the authors have also discusses this issue in POTENTIAL LIMITATIONS section."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The idea of implicit adversarial learning is novel.\n2. The experiments are solid and comprehensive, especially the improvements achieved on the FPR95 dataset is impressive.\n3. The paper is easy to follow and well-structured. The writing is good. The theoritical proofs are provided."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes the GOLD, a novel framework for graph OOD detection that employs implicit adversarial learning to generate synthetic OOD instances without the need for pre-trained models or additional OOD data. By utilizing a latent generative model to produce embeddings mimicking in-distribution data, which are then differentiated from actual in-distribution embeddings by a graph neural network encoder and an OOD detector, the method is able to effectively simulate the OOD exposure. The framework is evaluated on five benchmark graph datasets, demonstrating superior OOD detection performance without using real OOD data compared with the state-of-the-art OOD exposure and non-exposure baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I didn't see obvious weaknesses. Here I provide some suggestions:\n\nCould you provide a more in-depth analysis of the performance achievements on the FPR95 dataset, including an analysis from the perspective of the dataset's characteristics? Additionally, could you explain why similar significant improvements were not observed on other datasets (although the improvements on other datasets are also quite good)?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024gold,\ntitle={{GOLD}: Graph Out-of-Distribution Detection via Implicit Adversarial Latent Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y5einmJ0Yx},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite graph neural networks' (GNNs) great success in modelling graph-structured data, out-of-distribution (OOD) test instances still pose a great challenge for current GNNs. One of the most effective techniques to detect OOD nodes is to expose the detector model with an additional OOD node-set, yet the extra OOD instances are often difficult to obtain in practice. Recent methods for image data address this problem using OOD data synthesis, typically relying on pre-trained generative models like Stable Diffusion. However, these approaches require vast amounts of additional data, as well as one-for-all pre-trained generative models, which are not available for graph data. Therefore, we propose the GOLD framework for graph OOD detection, an implicit adversarial learning pipeline with synthetic OOD exposure without pre-trained models. The implicit adversarial training process employs a novel alternating optimisation framework by training: (1) a latent generative model to regularly imitate the in-distribution (ID) embeddings from an evolving GNN, and (2) a GNN encoder and an OOD detector to accurately classify ID data while increasing the energy divergence between the ID embeddings and the generative model's synthetic embeddings. This novel approach implicitly transforms the synthetic embeddings into pseudo-OOD instances relative to the ID data, effectively simulating exposure to OOD scenarios without auxiliary data. Extensive OOD detection experiments are conducted on five benchmark graph datasets, verifying the superior performance of GOLD without using real OOD data compared with the state-of-the-art OOD exposure and non-exposure baselines. The code will be released upon acceptance."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Network",
"Out-of-Distribution Detection"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d6f9e37d478eeacddcdbc2225a3f39eb5b4158a6.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "GOLD: Graph Out-of-Distribution Detection via Implicit Adversarial Latent Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y5tkxH7kxQ | Towards Efficient LLM Grounding for Embodied Multi-Agent Collaboration | main | Active | LLM planning;Large Language Models;Multi-Agent Collaboration | foundation or frontier models, including LLMs | 3;5;6;6 | 4;4;4;3 | 3;2;3;3 | 2;2;3;3 | 3;2;3;4 | 5 | 3.75 | 2.75 | 2.5 | 3 | -0.471405 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Q1. Can this approach work reliably in the Zero-shot coordination (unseen agents) setting? \n\nQ2. Does the success rate on Overcooked increase with more time steps? \n\nQ3: Is it possible to utilize a different LLM with a lower cost to analyze the ReAd-S of this method in the Overcooked-AI setting? Are LLMs other than GPT-4 able to perform low-level control in Overcooked where the action space involves only navigation and interaction? \n\nQ4: Why isn't ReAd-S compared with other methods using the Llama-3.1-70B model?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "S1: The method introduces a method to eliminate the need for direct physical interaction with the environment, instead utilizing a pre-trained critic to approximate an advantage score and internally replan the action. \n\nS2: The authors provide an extensive theoretical justification and guarantee for advantage decomposition in the multi-agent setting and empirically demonstrate the success of their method on two different benchmarks in multiple settings \n\nS3: Their method is able to generate direct low-level actions without the need for intermediate high-level action plans which is a significant advantage over previous methods\n\nS4: The method does not require access to exact probability of the sampled action from LLMs and can be used with closed-source LLMs"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new method for grounding LLMs for collaborative embodied multi-agent applications that utilizes a critic to score advantage values of actions based on the actions of partner agents. The critic provides advantage score for action predictions of the LLM to eliminate the need for direct physical interaction with the environment at inference time. They provide a detailed theoretical justification for the method and demonstrate superior grounding and coordination in the RoCo-Bench and Overcooked-AI environments compared to LLM-based baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1: Lack of discussion about generalization to unseen partners. In practical coordination scenarios, partner agents might not employ the same algorithms as the ego-agent. It is essential for coordination methods to be robust to unseen partners.\n\nW2: Limited test coverage on Overcooked AI benchmark - it looks like Reflexion and React have 0 % task completion rate. Experiments should consider more time steps as it might turn out that although Reflexion or React take more time, they could have a higher success rate. Additionally, Overcooked also has a multiple delivery objective, as performance in the second delivery might be improved by better time management during the first delivery. Testing only one delivery is insufficient.\n\nW3: The method utilizes a critic model that is trained in the environment. Whereas the baselines are all training-free approaches which might be an unfair comparison.\n\nW4: ““Choosing w at the current state s signifies all agents take no actions, then the next state s′ = s and the agents receive shared reward r(s, w) = 0 since w bring no changes to the environment.” is a strong assumption as wait states can lead to negative rewards in the environment depending on the task configuration.\n\nW5: The Overcooked-AI environment should be evaluated with ReAd-S to ensure the reliability of the method in the decentralized setting"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the Weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents an innovative approach by leveraging MARL theories to enhance collaborative behavior in LLM-based agents within multi-agent environments. The theoretical foundation is well-developed, allowing readers to gain a deep understanding of the approach. Furthermore, the experimental results and their detailed analysis contribute valuable insights to the research community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel feedback mechanism, Reinforced Advantage feedback (ReAd), which combines the self-improvement abilities of large language models (LLMs) with advantage decomposition techniques from multi-agent reinforcement learning (MARL). This closed-loop feedback strategy with the introduce of a critic module enhances cooperation in multi-agent settings by training LLMs to learn a sequential advantage function using critic regression. Experiments on two multi-agent benchmarks indicate that ReAd outperforms existing feedback-based baselines and state-of-the-art methods, highlighting its effectiveness in promoting collaborative strategies among LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Since the paper takes insights from marl theories, including some marl methods as a baseline will further strengthen the robustness of the results\n\n- Even with the _Difficult Variants_ of RoCoBench, the proposed method has achieved a nearly 100 percent success rate, so it seems the task may still be too easy or oversimplified.\n\n- The experimental results showing a 0% success rate for all baselines on the overcooked-ai benchmark need further explanation. The original overcooked-ai paper reported RL baselines with non-zero performance—why aren’t they included here?\n\n- As noted by the authors, there is a simplification of the visual perception and the generalizability of ReAd to more complex scenarios remains uncertain."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The authors argued that multimodal VLMs such as GPT4o don't always parse the scene reliably. Have you tried query GPT4o with both the agent prompt (shown in Fig 1) and image input and use GPT4o as the planner instead of the image parser? \n\n2. Have you thought about using another LLM as a critic?\n\n3. How does ReAd-J compare to existing RL baselines on Overcooked?\n\n4. It looks like none of the other LLM baselines succeeded in a single episode in Overcooked. This looks strange. Could you explain why they failed? Is it because they didn't complete the task within a fixed number of timesteps or they actually never completed the task? How was the timestep limit set? Does the results change if you increase the limit?\n\n5. Did you try the models on the original RoCoBench? Why create a new dataset instead of using the original one, which has been tested/used by many existing studies?\n\n6. Do other LLM baselines (MindAgent, React) have agent communication in your DV-RoCoBench experiment?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea of incorporating some critic in the LLM planner is promising and can mitigate hallucination among the LLMs. It's an interesting and important problem for the AI/LLM community.\n\nThe paper is well written and easy to understand. The experiment is well done and the analysis is thorough and detailed. \n\nI particularly appreciate the extensive discussions and analyses presented in the appendix, as they openly address the model's robustness and limitations. This level of detail not only strengthens the credibility of the research but also provides valuable insights for future work in this area.\n\nOverall, I applaud the authors for this well-written work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel algorithm Reinforced Advantage feedback (ReAd), which leverages LLM for multiagent planning. The paper tests the algorithm on two domains, RoCoBench and Overcooked. The results show that ReAd outperformed existing LLM-based baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Model: The main weakness of the proposed algorithm is its limited generalizability. It seems like the critic NN is trained over hundreds of plans/trajectories in each domain. If this is the case, then this model becomes very domain-specific and would need a new critic for each new domain. That defeats the purpose of using LLM as a planner, which is meant to be fast and easily adaptable to different domains so you don't have to build any domain-specific model. I wonder if you can use LLM (maybe with few-shot prompting) as a critic?\n\n2. Baseline choice: The paper only considered LLM baselines. Since the model is essentially doing RL, why not include RL baselines as well? If you are already training the critic to estimate some reward functions, then why do you need the LLM planner at all? Or at least you can try a version where you use RL for planning and LLM for communication among agents. There're also a lot of existing work on Overcooked AI. Does your model outperform those baselines?\n\n3. Baseline implementation: If I recall correctly, LLM agents such as MindAgent and React don't have communication modules. I encourage the authors to include more details on how these baselines are implemented. If these baselines don't actually allow communication among agents in DV-RoCoBench, then it's not a fair comparison.\n\n4. Evaluation: The paper would be much stronger if it would include a human experiment/evaluation, which was used in MindAgent and RoCo.\n\n5. Presentation: It would make the results more readable if you can put ReAd-J and ReAd-S next to each other in Fig 3."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In Line 38, I wonder about the author's mentions of \"lack of task-specific knowledge\" instead of domain-specific or environment-specific knowledge. LLMs possess commonsense actionable knowledge (i.e., knowledge to perform tasks) but have never learned the characteristics of the actual environment where this knowledge should be applied.\n\n2. What is the size of dataset \\mathcal{D}?\n\n3. Rather than directly fine-tuning the LLM, it uses prompting to employ the LLM as an advantage function optimizer. How is the policy iteration implemented where (i) the LLM (behavior policy) is fine-tuned using the advantage function learned from dataset \\mathcal{D}, and (ii) this fine-tuned policy becomes the behavior policy to collect new data? Does dataset collection by the behavior policy occur only once?\n\n4. What is the neural network architecture of the advantage function?\n\n5. Are environment observations converted to text before being passed to the LLM agent?\n\n6. Does using the Advantage function (or Q-function) directly as a policy significantly decrease performance? I'm curious about how a conventional discrete action space RL approach would perform, where Q-values are calculated for all actions and the action with the highest Q-value is selected."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Grounding LLM's reasoning prowess in physical environments is a highly promising and crucial field that can help leverage continuously evolving LLMs for real-world problem solving.\n\n2. Addressing interaction costs in embodied environments is a challenging yet practical problem that can resolve various factors, including safety issues and LLM inference costs.\n\n3. The paper is well-organized and written clearly without confusion, making it easily accessible to readers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work discusses grounding LLM's common sense reasoning capability in physical environments. Specifically, it explores methods to not only generate appropriate actions but also effectively increase interaction efficiency when using Multi-Agent LLM agents in embodied environments. Based on the theoretical background of multi-agent systems, the authors utilize a learned advantage function as immediate feedback for LLM agents. The LLM is prompted to generate actions with high advantage values, and if the generated action plan has low advantage values, it is designed to perform replanning autonomously. The superiority of this framework is demonstrated through experiments across multiple environments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main concern is the novelty of the contribution. Applying MARL's theoretical aspects to embodied multi-agent collaboration appears quite straightforward, and seems independent from the authors' stated goal (as written in Line 77) of enhancing LLMs' reasoning capabilities.\n\n2. While the introduction addresses interaction efficiency as a major issue in prior work, the data collection efficiency of the proposed learning method should also be discussed. This TRPO-style on-policy learning approach is generally known to be less data-efficient than off-policy RL algorithms (even in single-agent settings).\n\n3. I am concerned if the comparison between baselines is fair. For instance, was the same amount of environmental interaction required to create ReAd's training dataset also provided to ReAct and Reflexion?\n\n4. If data collection through interaction is possible, I think the current environmental tasks are relatively simple. For example, using sparse rewards (success/fail) attached to the end of collected dataset trajectories for retrieval-augmented task planning could enhance LLM reasoning more simply and efficiently than the proposed method. Please refer to paper [1]. Could the authors include [1] as a baseline in their experimental comparisons?\n\n[1] LLM-Planner: Few-Shot Grounded Planning for Embodied Agents with Large Language Models, CVPR 2023"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present a novel and theoretically supported feedback for closed-loop LLM planning in multi-agent collaboration."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Efficient {LLM} Grounding for Embodied Multi-Agent Collaboration},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y5tkxH7kxQ},\nnote={under review}\n}"
},
"abstract": {
"value": "Grounding the reasoning ability of large language models (LLMs) for embodied tasks is challenging due to the complexity of the physical world. Especially, LLM planning for multi-agent collaboration requires communication of agents or credit assignment as the feedback to re-adjust the proposed plans and achieve effective coordination. However, existing methods that overly rely on physical verification or self-reflection suffer from excessive and inefficient querying of LLMs. In this paper, we propose a novel framework for multi-agent collaboration that introduces Reinforced Advantage feedback (ReAd) for efficient self-refinement of plans. Specifically, we perform critic regression to learn a sequential advantage function from LLM-planned data, and then treat the LLM planner as an optimizer to generate actions that maximize the advantage function. It endows the LLM with the foresight to discern whether the action contributes to accomplishing the final task. We provide theoretical analysis by extending advantage-weighted regression in reinforcement learning to multi-agent systems. Experiments on Overcooked-AI and a difficult variant of RoCoBench show that ReAd surpasses baselines in success rate, and also significantly decreases the interaction steps of agents and query rounds of LLMs, demonstrating its high efficiency for grounding LLMs. More results are given at \\url{https://read-llm.github.io/}."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM planning",
"Large Language Models",
"Multi-Agent Collaboration"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a30f6c123365d3adf6a7eecad21feaf4d53e6503.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Efficient LLM Grounding for Embodied Multi-Agent Collaboration"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y6wVRmPwDu | QuantBench: Benchmarking AI Modeling for Quantitative Investment | main | Active | deep learning;quantitative investment | datasets and benchmarks | 3;3;3;8 | 4;2;4;4 | 2;2;3;3 | 2;2;2;3 | 2;2;3;4 | 4.25 | 3.5 | 2.5 | 2.25 | 2.75 | 0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Given the increasing use of Large Language Models (LLMs) and Deep Reinforcement Learning (DRL) in quantitative finance, does the team have plans to integrate these techniques into QuantBench in future versions?\n\n2. Would the authors consider expanding the platform to include risk-evaluation metrics? Could the author provide insights into the technical feasibility of incorporating stress testing in QuantBench, perhaps by simulating market shocks based on historical crisis periods?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Originality: The paper tried to provide a standardized, open-source environment specifically designed for quantitative finance, which supports quant trading tasks from factor mining to order execution, representing an integration of existing modeling methods with advanced data processing and evaluation mechanisms.\n\n2. Quality: The quality of the work is OK, providing comparison across multiple dimensions, including model architecture, training objectives, and validation strategies.\n\n3. Clarity: The paper is well-organized and explains the design and objectives of QuantBench.\n\n4. Significance: The platform currently focuses on established techniques and common performance metrics, providing a useful standardized framework for evaluating models in quantitative finance, and its significance might be impactful within the industry."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents QuantBench, an industrial-grade benchmark platform designed to standardize and enhance AI research in quantitative finance. The platform integrates diverse data sources, including market, fundamental, relational, and news data, and providing a layered approach that spans alpha factor mining, modeling, portfolio optimization, and order execution. It also supports multiple data resolutions, from quarterly to tick-level data, facilitating a range of quant tasks and enabling multi-scale strategies. \n\nQuantBench categorizes models based on architectural design and training objectives, evaluating temporal models for time-series data and spatial models for relational data. It also examines the impact of training objectives, including classification, IC, MSE, and ranking losses, finding that different objectives yield varied results based on model architecture.\n\nExperiments demonstrate that model updating frequency, validation set construction, and ensemble methods significantly impact model performance, particularly in managing alpha decay and reducing overfitting, which suggests future directions in continual learning, robust validation strategies, and ensemble diversity to further improve predictive accuracy and model stability.\n\nQuantBench aims to provide a standardized and open-source platform that fosters collaboration and bridges the gap between academic research and practical industry applications in AI-driven quantitative investment."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper's originality may be somewhat limited in terms of introducing novel methodologies or fundamentally new problem formulations. Although this application to quant finance is useful, the overall concept of a benchmarking platform is not entirely new. Moreover, the paper is limited by its focus on classical machine learning and deep learning algorithms, without including more recent advancements in AI specifically tailored to quantitative finance, such as Large Language Models (LLMs) and Deep Reinforcement Learning (DRL).\n\n2. The paper’s focus on predictive accuracy and return metrics is valuable, but it lacks depth in evaluating risk management, a crucial aspect of quantitative trading. Since QuantBench aims to align with industry standards, it would benefit from incorporating a broader set of risk metrics beyond Sharpe ratios—such as maximum drawdowns, downside risk, or conditional value-at-risk (CVaR)—to provide a more comprehensive assessment of model robustness.\n\n3. The platform currently focuses on established techniques and common performance metrics, may not fully address the rapidly evolving needs of practitioners and researchers looking for innovative solutions in quantitative trading."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Dear authors, \n\nWe consider the work interesting and relevant. Nevertheless, we would like to point out certain improvement opportunities.\n\nGENERAL COMMENTS\n\n(1) - \"The results in Table 2 indicate that DNNs generally outperform tree-based models in IC\" -> The authors only compare an LSTM model against an XGBoost model. We consider the claim to be too strong for the referenced results. To strengthen the claim, we encourage the authors to conduct experiments with additional tree models (e.g., random forest, LightGBM, CatBoost, ...) and contrast them to the results for deep learning models mentioned in Table 3.\n\n(2) - We encourage the authors to include some statistical tests to assess whether the differences among results are statistically significant. This could be an important step in the benchmark when analyzing the results.\n\n(3) - The authors mention three task-agnostic metrics (robustness, correlation, decay) but it is unclear where they were reported and assessed when considering the results obtained.\n\n(4) - The authors provide a timeline of models covered by QuantBench in Figure 3. Nevertheless, the experiments do not cover some of the most recent ones. Would it be possible to include them?\n\nTABLES\n\n(5) - All tables reporting metrics: (a) provide arrows next to the metric names (up/down) indicating whether a higher/lower result is better; (b) bold the best results, (c) align numbers to the right, to make differences of magnitude evident to the reader.\n\nMINOR COMMENTS\n\n(6) - \"charaterstic-sorted\" -> \"charateristic-sorted\"\n\n(7) - \"Wikidata’s inclusion yielded minimal improvements, possibly because the information is already widely known and exploited by other market participants.\" -> Please provide some additional context to understand how the fact that other market participants could exploit such information affects the results or the data used to train the model.\n\n(8) - \"QuantBench is designed to provide a holistic view of the full quant research pipeline, rather than focusing on specific techniques such as reinforcement learning\" -> It seems that the current version of the proposed QuantBenchmark focuses only on a specific type of methods (e.g., no reinforcement learning methods were considered). We encourage the authors to provide a more detailed perspective on what is currently supported and the ambition to support in the future. Are there some kinds of models that will not be considered in the future (e.g., reinforcement learning)?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- the authors propose a novel benchmark for artificial intelligence methods on quantitative investment\n- they compare the proposed benchmark with existing SOTA benchmarks\n- they perform a comprehensive evaluation of machine learning methods related to quantitative investment"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce a new industrial-grade benchmark with three goals in mind: (a) alignment with quantitative investment industry practices, (b) flexibility to integrate artificial intelligence algorithms, and (c) provide a pipeline that covers the whole quantitative investment process. They also perform an empirical study that reveal some new research directions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- the authors make some claims that are not backed by substantial experimentation\n- the authors should include some assessment regarding the statistical significance of the results obtained"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "None"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "see above"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper provides a benchmark platform for AI methods in quantitative investment. It offers as strength \"standardization with quantitative industry practices\", which is not a strength in general. It might be an advantage for transferability of results, but in general industry standards can restrict innovation and are often rather complex. Another strength argued for is the integration of various AI algorithm, which should be considered a minimum requirement rather than a key strength. The last strength, i.e., full-pipeline coverage is more interesting, as other benchmarks often only provide data, putting the burden on pre-processing on the researcher, which also makes reproducibility more challenging. The paper focuses on non-GenAI (though it does assess transformers), which is ok, though it is certainly a restriction, as from personal experience I know a number of investors that use, e.g., ChatGPT for the better or worse. That is, the approach of feature engineering is very heavily embedded in the platform (e.g., they provided extracted features from news), though overall receiving less and less attention in AI. Also the data is only vaguely described in Section 2 and 3, leaving the reader puzzled about the basic composition of the benchmark. \n\nA major concern is that the paper fails to discuss any ethical (and regulatory) concerns, such as bias (or compliance, which is huge topic in industry, which the seem to aim at)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "see above"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the question listed in the weakness section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors effectively convey the importance and necessity of having a benchmark platform like QuantBench in the realm of quantitative finance. \n\nThe abstract and introduction are well-structured and clearly articulate the research goals and the authors' approach, providing a solid foundation for understanding the study's scope and significance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose the QuantBench, which is a platform claimed to boost quant research efficiency by eliminating burdensome preprocessing tasks, enabling researchers to focus on algorithmic innovations. They state that it serves as a bridge between academia and industry, facilitating the implementation of advanced algorithms in investment strategies and enhancing practical applications of academic research. Through standardization, they believe that QuantBench can improve communication between these sectors, accelerating AI advancements in quantitative investment. They also conducted multiple empirical studies using QuantBench to explore key research directions, including addressing quant data's distribution shifts, evaluating graph structures, and assessing the real-world applicability of neural networks versus tree models. The outcomes of these studies highlight the needs for robust modeling approaches, such as training models with inherent diversity or implementing causal learning methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The benchmark incorporates models that do not include some state-of-the-art (SOTA) approaches. Although the authors present a clear roadmap for model development in Figure 3, the experiments only include a limited selection of earlier models. Plus, there are more recent related work which can be included in the model comparison, like [1]. Could you please clarify why the SOTA methods are excluded from your model comparisons? The absence of these comparisons leaves the derived insights less compelling. \n\n[1] Wang, S., Yuan, H., Zhou, L., Ni, L. M., Shum, H. Y., & Guo, J. (2023). Alpha-gpt: Human-ai interactive alpha mining for quantitative investment. arXiv preprint arXiv:2308.00016.\n\n(2) While Section 3 of the paper extensively describes the data collection process for the benchmark, the specifics regarding the data inputs for each model type outlined in Section 6.2 remain ambiguous and potentially inconsistent. It is noted that the authors claim their selected models utilize textual feature inputs; could you provide more detailed explanations of how models categorized under 'Tabular' incorporate these textual features? Additionally, the paper mentions that graph models use graphical features as partial inputs, yet it lacks essential details. It is crucial to include information on whether other comparative models also utilize graphical features, how they do so, and where one can find details about the graph construction to assess the quality of your graph generation.\n\n(3) And model categorization in Table 3 lacks strong reference. Would you please provide the corresponding academic reference for your model classification?\n\n(4) Section 6.4 appears to lack crucial details, as the authors do not provide the necessary context for alpha decay, such as its typical usage and the commonly significant levels in the finance domain. Additionally, although multiple models are discussed in the previous section, it is unclear which model is used to create the visualization in Section 6.4. The authors have not clearly specified this. It would be beneficial for the clarity and completeness of the analysis if these points were addressed in the text.\n\n\n(5) The shadow color choices used to represent 1 standard deviation for the two types of returns could be improved by using different colors. Currently, it is challenging for readers to distinguish between them. Using distinct colors would enhance clarity and make it easier to differentiate the returns visually.\n\n(6) Given that the paper presents multiple studies to claim the applied scenarios of QuantBench from various perspectives, the derived insights are dispersed and challenging to follow. It is essential to include a conclusion section that consolidates these insights into a summarizing paragraph. This section should discuss how QuantBench can enhance real-world investment strategies and identify the unresolved questions that persist. Such a conclusion will provide a comprehensive overview, highlighting the platform’s practical implications in the industry and directing future research towards addressing the remaining challenges."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A systematic benchmarking platform for AI-driven quantitative investment"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024quantbench,\ntitle={QuantBench: Benchmarking {AI} Modeling for Quantitative Investment},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y6wVRmPwDu},\nnote={under review}\n}"
},
"abstract": {
"value": "The field of artificial intelligence (AI) in quantitative investment has seen significant advancements, yet it lacks a standardized benchmark aligned with industry practices. This gap hinders research progress and limits the practical application of academic innovations. We present QuantBench, an industrial-grade benchmark platform designed to address this critical need. QuantBench offers three key strengths: (1) standardization that aligns with quantitative investment industry practices, (2) flexibility to integrate various AI algorithms, and (3) full-pipeline coverage of the entire quantitative investment process. Our empirical studies using QuantBench reveal some critical research directions, including the need for continual learning to address distribution shifts, improved methods for modeling relational financial data, and more robust approaches to mitigate overfitting in low signal-to-noise environments. By providing a common ground for evaluation and fostering collaboration between researchers and practitioners, QuantBench aims to accelerate progress in AI for quantitative investment, similar to the impact of benchmark platforms in computer vision and natural language processing."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep learning",
"quantitative investment"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/552d6354a282b59c5939750f7e01443e403a90fc.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/240b79b6213ec4802a7816bb5d23fdedbb656cbf.zip"
},
"title": {
"value": "QuantBench: Benchmarking AI Modeling for Quantitative Investment"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y7Ud3RAPT8 | MolCoMA: Complementary Masking Strategy for Promoting Atom-Level Multi-Modal Molecular Representation | main | Active | Multi-modal Fusion;Molecular Pretraining;Molecular Representation Learning | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;5;5 | 4;4;4;4 | 2;1;3;3 | 2;1;2;2 | 3;2;2;2 | 4 | 4 | 2.25 | 1.75 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- [Q1] When fusing the graph and geometry features prior to feeding them to the unified encoder, is there any mechanism used to distinguish tokens from the 2D graph and those from the 3D geometry?\n- [Q2] The overall pipeline highly resembles MultiMAE [B] in computer vision, where a non-uniform masking strategy was used by sampling probabilities from a Dirichlet distirbution, then masking each modality with its corresponding probability. Have the authors considered non-uniform masking probabilities as such?\n- Small typo in Line 271: did you mean \"concatenate\" instead of \"contact\"?\n\n[D] Bachmann et al., MultiMAE: Multi-modal Multi-task Masked Autoencoders. ECCV 2022."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- [S1] **Simplicity of approach.** Each component of MolCoMA is simple and is presented clearly such that the paper is easy-to-follow.\n- [S2] **Interesting Topic.** Pretraining GNNs that are generalizable to various molecular property prediction tasks is a well-studied problem, and MolCoMA could be a good addition to try in the molecular pretraining literature for practitioners."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes MolCoMA, a self-supervised molecular pretraining framework that integrates information from both 2D topology and 3D geometry in a fine-grained manner. The architecture is consisted of two modality-specific encoders, leading to a unified cross-modal encoder that leverages a complimentary masking strategy to mitigate representational overlap between the two modalities. 2D and 3D modalitity-specific decoders then perform feature reconstruction and conformer denoising, respectively, in addition to a cross-modal node-wise feature reconstruction task that aims to recover 2D features from the 3D features. Experiments on MoleculeNet and QM9 datasets show that MolCoMA outperforms previous work on diverse molecular property prediction tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Unclear motivation and intuition.** Despite complementary masking being the main contribution of this paper, the intuition behind the approach is hard to understand.\n - [W1] In general, a more difficult pretraining task is expected to bake in more useful knowledge into neural networks as seen with using hard negatives in contrastive learning literature [A]. Based on this intuition, it seems counterintuitive that complementary masking outperforms random/uniform masking as the ground-truth targets can easily be found based on nodes from the other modality during pretraining. In other words, how can we expect the 2D-specific encoder to perform well on downstream tasks with no 3D conformers, if it was pretrained towards relying on 3D-information, which is not present during finetuning?\n - [W2] Throughout the paper, the authors mention 2D and 3D information \"each possess unique representational strengths\" (Line 210), yet claim that uniform/random masking used in previous work \"result in feature redundancy due to the overlap\" (Line 207), which makes the main motivation for complementary masking self-contradictory. If 2D and 3D information indeed contain unique information, shouldn't it be the case that random masking should be enough since they express different knowledge? But if there is large representational overlap, that means complementary masking makes the task too easy for the model, which brings us to the point of W1 above. Either way, the intuition behind the proposed method needs further discussion and clarification.\n\n- **Insufficient justification on method design choices.** The proposed architecture and objective are not justified properly and requires further discussion.\n - [W3] The unified encoder composed of vanilla Transformer layers fails to preserve symmetry within the data distribution, which is crucial in ensuring generalizability and robustness. Specifically, SE(3) transformations $g(\\cdot)$ on the 3D geometry $\\mathbf{R}$ are not respected for 3D noise prediction (i.e., $f_{\\mathbf{\\theta}}( \\mathbf{X}, \\mathbf{E}, g(\\mathbf{R}) ) \\neq g(f_{\\mathbf{\\theta}}( \\mathbf{X}, \\mathbf{E}, \\mathbf{R})$) as vanilla attention is not equivariant to SE(3) roto-translations. This equivariance property could be enforced (1) approximately via data augmentation [B] or (2) exactly by replacing the attention mechanism [C], but MolCoMA discusses neither of these, making the architecture design less reliable. \n - [W4] The cross-modal reconstruction objective seems ill-defined, in the sense that the model is trained to map 3D features that are responsible for predicting the ground-truth noise (thereby depends on the noise added to the 3D conformer), to 2D features that are stable regardless of the noise. In effect, this could result in a suboptimal trade-offs for the 3D denoising task, yet this discussion is only done vaguely in Lines 324-332.\n - [W5] Lastly, the final objective (Equation 9) involves a weighted sum of three distinct loss functions, without any guidance on how the weights should be set. It would be interesting to test how the performance varies with different weights (other than $\\alpha_1 = \\alpha_2 = \\alpha_3 = 1$ case), possibly leading to insights on how each modality contributes to molecular property prediction.\n\n[A] Robinson et al., Contrastive Learning with Hard Negative Samples. ICLR 2021.\\\n[B] Quiroga et al., Revisiting Data Augmentation for Rotational Invariance in Convolutional Neural Networks. AISC 2019.\\\n[C] Fuchs et al., SE(3)-Transformers: 3D Roto-Translation Equivariant Attention Networks. NeurIPS 2020."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- What is the difference between $H(l)$ and $H^{(l)}$ in Eq (4)?\n- How to choose the hyperparameters provided in Table 5? Since molecular representation learning frameworks are often sensitive to the hyperparameters, the hyperparameter search strategy is also important."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- MolCoMA achieves strong performance in two standard molecule benchamrks, MoleculeNet and QM9.\n- The idea of unified encoding with complementary masking is simple yet effective. Although I think this is not new in the self-supervised learning community, e.g., [1-2], but the idea has been under-explored in the molecule domain yet, so it is somewhat novel.\n\n[1] MixMAE: Mixed and Masked Autoencoder for Efficient Pretraining of Hierarchical Vision Transformers \\\n[2] Mixed Autoencoder for Self-supervised Visual Representation Learning"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces MolCoMA, a molecular representation learning framework that leverages two modalities of molecules: 2D topology and 3D geometry. Specifically, MolCoMA pretrains 2D and 3D molecule encoders (with another unified encoder) through masked auto-encoding with complementary masking. This masking strategy encourages the model to learn modality-specific characteristics more effectively, filling in the gaps between the complementarily-masked modalities. As a result, the pre-trained 2D and 3D encoders via MolCoMA achieve state-of-the-art performance on 2D downstream tasks (MoleculeNet) and 3D downstream tasks (QM9)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The method is not comprehensively described.\n - What are the data transformations, $T\\_\\text{2D}$ and $T\\_\\text{3D}$?\n - Is the unified encoder a vanilla bidirectional transformer? Does it simply take the mixed representations as an input sequence?\n - Is the output sequence of the unified encoder directly passed into the 2D and 3D decoders?\n - What is the Simple GNN Tokenizer? Is it randomly initialized or a pretrained model? Due to the stop-gradient operation, the tokenizer is not optimized, so it requires a detailed description.\n - When is the complementary masking applied? In Figure 1, it appears that masking is applied before modality-specific encoding. However, in Section 3, it seems that masking is applied right before unified encoding.\n - In cross-modal reconstruction, $h\\_\\text{2D}$ and $h\\_\\text{3D}$ appear to be modality-specific representations that do not rely on the unified encoder. Is this correct? (I assume $h\\_\\text{2D}$ and $h\\_\\text{3D}$ are only defined in L252-L263).\n - What is meant by avoiding the use of visible geometric tokens in L333? It is difficult to understand how cross-modal reconstruction works and why visible tokens should be avoided.\n - Overall, there are several confusing notations, and many parts are not clearly explained. I strongly recommend the authors use concrete mathematical notations to reduce confusion.\n2. Some concerns on experimental results.\n - Some baselines use different pretraining datasets compared to this paper. For example, MoleBERT uses 2M molecules from the ZINC15 database, and 3D-EMGP and GraphMVP are pretrained on the GEOM dataset containing 50K–100K molecules. However, MoleCoMA uses PCQM4Mv2 with 3.4M molecules. This difference in pretraining datasets could significantly impact performance, so I wonder if MoleCoMA is still effective with a different (smaller) dataset.\n - The paper lacks analysis of the effectiveness of the unified encoder. Since the authors claim the unified encoder as a contribution of this paper, providing such analysis is crucial.\n - One way to interpret the quality of molecular representations is molecule retrieval based on the learned representations. If the retrieval finds chemically similar molecules well, one could infer that the learned representations are chemically informative and useful for downstream tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Can the attention map of the Unified Encoder be visualized to examine the interactions between modalities?\n2. Can the outputs of two unique encoders, which are not in the same representation space, be directly merged and encoded together without alignment?\n3. What impact does the structure of the GNN Tokenizer used as a 2D Decoder have on the results? For example, GCN, GAT, GIN, Graph Transformer.\n4. What impact do the three sub-objectives in Equation 9 have on performance individually? It would be helpful to provide an ablation analysis of the three sub-objectives."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The motivation is understandable and reasonable, as previous work on multi-modal molecular representation learning has overlooked the cross-modal complementarity at the atom level.\n2. The proposed method is technically sound, as similar approaches have already been proposed and validated in image-text multi-modal representation pre-training.\n3. The experimental results on MoleculeNet and QM9 look promising, and the proposed method has been compared with the latest baseline models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a multi-modal molecular representation pre-training framework, MolCoMA, to facilitate fine-grained interactions of intrinsic features across modalities. The framework employs two modality-specific encoders to capture unique characteristics of each modality and a unified encoder with a complementary masking mechanism to integrate these features. By optimizing in-modal and cross-modal reconstruction losses, this framework learns robust 2D and 3D molecular representations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The datasets used for evaluation in the experiments are relatively limited. For 2D, the evaluation is only conducted on classification datasets from MoleculeNet, without testing on regression datasets. For 3D, the evaluation is only conducted on QM9, without testing on datasets like MD17 or GEOM-Drugs.\n2. The Unified Encoder is a vanilla self-attention module that does not take structural inductive bias into account.\n3. Some details of the method are not clearly described, such as the implementation of the cross-modal reconstruction function in Equation 8."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Overall, the contribution of this paper seems incremental and the experimental evaluations are not fully convincing in several aspects. Please refer to weaknesses for details."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The proposed method interacts the 2-D information and 3-information, which is reasonable.\n2. The proposed method achieves promising results in different tasks.\n3. The paper is well-written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel molecular representation learning framework, termed MolCoMA, which demonstrates promising performance on the MoleculeNet and QM9 datasets. MolCoMA leverages complementary information from both topological and geometric representations while facilitating cross-modal interaction. Notably, it employs a well-designed masking technique that obscures atoms across different modalities in a complementary manner. By integrating various types of molecular information, the proposed method effectively addresses a range of downstream tasks, including biological and quantum applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of novelty. The interaction between 2d-topology and 3d-geometry has already the explored in previous studies, such as GraphMVP [a], Transformer-M [b]. \n\n2. MolCoMA is pre-trained on PCQM4Mv2, while Mole-Bert is pre-trained on ZINC and GraphMVP is pre-trained on GEOM. Therefore, the comparison is not fair. To verify the effectiveness of the proposed method, the pre-training datasets of different methods should be the same.\n\n3. More benchmarks on 3D tasks should be conducted, such as MD17 and LBA. Moreover, the proposed method should be compared with recent works SLIDE [c] and Frad [d].\n\n4. Why is complementary masking better than other strategies? The masked information of one modality can be revealed by another modality. The authors should conduct more experiments or theoretical analyses between different masking strategies and verify why complementary masking is effective.\n\n5. To verify the effectiveness of the MolCoMA, it would be better to conduct experiments on more architectures such as EGNN [e]\n\n[a] PRE-TRAINING MOLECULAR GRAPH REPRESENTATION WITH 3D GEOMETRY, ICLR 2022.\n\n[b] ONE TRANSFORMER CAN UNDERSTAND BOTH 2D & 3D MOLECULAR DATA, ICLR 2023.\n\n[c] Sliced Denoising: A Physics-Informed Molecular Pre-Training Method, ICLR 2024.\n\n[d] Fractional Denoising for 3D Molecular Pre-training, ICML 2024.\n\n[e] E(n) Equivariant Graph Neural Networks, ICML 2021."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Complementary masking strategy enhances atom-level multi-modal molecular representation"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024molcoma,\ntitle={MolCo{MA}: Complementary Masking Strategy for Promoting Atom-Level Multi-Modal Molecular Representation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y7Ud3RAPT8},\nnote={under review}\n}"
},
"abstract": {
"value": "Molecular representation learning, which captures the fundamental characteristics of chemical compounds, is crucial for AI-driven drug discovery. Methodologies exist that integrate various modalities (e.g., 2D topology and 3D geometry) and develop robust representations. However, current multi-modal fusion strategies either align embedding space through independent models separately, thereby overlooking complementary information, or bridge modalities at a coarse-grained level, failing to capture inherent correlation. To facilitate fine-grained interactions of intrinsic features across modalities, this study presents MolCoMA, an innovative pretraining framework for Molecular representation, employing a unified encoder that leverages Complementary Masking mechanism. Specifically, we first employ two distinct encoders to capture the unique characteristics and structures inherent in different modalities. We then utilize a unified encoder accompanied by a customized complementary masking strategy to seamlessly integrate information, mitigating overlap and similarity between 2D and 3D representations. Finally, we incorporate a cross-modal reconstruction module to enhance fine-grained interactions at the atomic level. Extensive experiments demonstrate that our model outperforms existing molecular pretraining methods across both 2D and 3D benchmarks. This finding underscores the effectiveness of our approach to fusing information between modalities."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-modal Fusion",
"Molecular Pretraining",
"Molecular Representation Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/39dc003a2444c2b363bfd7bb7ab32962a65d326a.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MolCoMA: Complementary Masking Strategy for Promoting Atom-Level Multi-Modal Molecular Representation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y80D4IojuY | Agent-to-Sim: Learning Interactive Behavior Model from Casual Longitudinal Videos | main | Active | dynamic 3d reconstruction; multi-video registration; motion generation | applications to computer vision, audio, language, and other modalities | 3;6;6;6 | 3;2;4;5 | 2;3;2;3 | 3;3;3;3 | 2;3;3;4 | 5.25 | 3.5 | 2.5 | 3 | 3 | 0.258199 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper proposes a framework to learn interactive behaviors of agents in 3D worlds, which I believe is an interesting and promising direction. With the rise of embodied AI, this paper offers a novel approach to leveraging real-world videos for understanding interactions between agents and their environment.\n\n2. This paper is clearly written and well-presented, with an impressive demo that effectively showcases the benefits of the proposed framework.\n\n3. The proposed methods are logical for building the entire system and have been thoroughly validated through experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a framework called Agent-to-Sim to learn the interactive behaviors of 3D agents in a 3D environment from casually captured videos. Specifically, a coarse-to-fine registration method is developed for persistent and complete 4D representations. A generative model of agent behaviors of agent behaviors is trained to enable the generation of agent's reactions to the observer's motions. Interesting demos show the effectiveness of the proposed system."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In terms of behavior generation, there is extensive existing work on generating realistic human motions based on intentions or trajectories, with most approaches utilizing parametric models like SMPL. Given that the agent in the demo appears somewhat unrealistic, I was curious why parametric models weren't used for agent representation. For example, there are parametric models available for animals, such as [1,2].\n\n[1] Zuffi et al.: 3D Menagerie: Modeling the 3D Shape and Pose of Animals, CVPR 2017\n\n[2] Zuffi et al.: Lions and Tigers and Bears: Capturing Non-Rigid, 3D, Articulated Shape from Images, CVPR 2018\n\n2. Regarding the egocentric perception: is the egocentric visual/video data encoded as well? It is mentioned \"a latent representation from a\nlocal feature volume around the agent\", but an agent like a cat can not see the scene behind it. Besides, the proposed egocentric encoding to transform the world to the egocentric coordinates to avoid over-fitting as well as past encoding to capture the past motion sequence seem to have been proposed and well discussed in [3], which also predicts the motions from spatial controls.\n\n[3] Jiang et al.: EgoPoser: Robust Real-Time Egocentric Pose Estimation from Sparse and Intermittent Observations Everywhere, ECCV 2024\n\n3. The proposed system is trained and tested on the collected dataset. However, based on the information provided in the paper, this dataset appears quite small and lacks diversity, as it includes only 4 agents, 3 scenes, and has a limited duration. Given the availability of numerous large datasets featuring moving agents and varied environments, incorporating these existing datasets for evaluation would strengthen the findings. Additionally, I am curious about the generalization capability of the proposed method, as the experiments were conducted by training on only 22 videos and testing on a single remaining video. Do the training and testing data contain the same scenes? If so, this might lead to potential overfitting issues.\n\n4. I am interested in the potential applications of the proposed systems beyond the general areas mentioned in the paper, such as driving, gaming, and movies. It would be valuable to explore specific existing tasks to demonstrate how the system can provide practical benefits."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Please address the questions raised in paper weaknesses in the rebuttal. Please address the lacking details of the method description and the raised shortcomings of the evaluation."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The proposed approach to combine 4D scene reconstruction and training diffusion models for behavior prediction seems novel and is interesting.\n- The paper is mostly well written and is easy to follow.\n- The paper presents two contributions: 1. 4D scene reconstruction including body pose estimation of an agent using NeRF-based representations. 2. A hierarchical diffusion model for behavior prediction in the 4D scene representation.\n- The related work section provides a compact overview of the state-of-the-art for related fields.\n- The experimental evaluation demonstrates improvements over previous methods for 4D scene reconstruction and agent motion prediction. It also demonstrates improvements over several ablations of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Agent-To-Sim (ATS), a framework for learning models of agent behavior conditioned on the observer (ego-perspective) and the static background scene. The scene and the agent are represented using neural radiance field in a canonical state. Additionally, the dynamic motion of the observer and the agent as well as the agent's body deformation is represented using a sequence of SE(3) poses. The latter is achieved by representing the motion as a set of 3D Gaussians in the agent frame and mapping locations by blend-skinning. The NeRF representations are combined and rendered in the respective poses using ray integration. The dynamic scene reconstruction is obtained from video sequences using bundle-adjustment like optimization, initialized with a trained camera pose regressor. Behavior prediction for the agent is trained using a 3-stage diffusion model which subsequently predicts the goal sequence, the path of the agent, and finally its body pose sequence. The approach is evaluated for camera registration, 4D reconstruction, and behavior prediction accuracy quantitatively on one test video sequence for a cat. It is also compared with previous methods as baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper is too packed by presenting two contributions at once and by this important details (see below comments) are missing which hinder reproducibility of the approach.\n- l. 199ff, it is unclear how the rendering works. How are the NeRF representations of scene and agent combined in detail? Please provide further information either in main paper or supplemental material.\n- The paper references directly to several Figures in the appendix which is attached to the main paper without noting that this is supplemental material. Please indicate clearly in the main paper text that they are supplemental material when referencing the figure. Currently, it seems the paper would try to circumvent the page limit. \n- What is the orientation representation for regression in Eq. 5 ?\n- It is not clear why there is a need for a joint optimization of poses and representations T and D as indicated in Eq 6, because the video sequence is annotated with camera poses from a different (unspecified) system (l. 225). Please clarify which other method is used to obtain the camera motion trajectory. Why is it necessary to optimize the poses in Eq. 6 ? How would the approach perform without this post-optimization? Please add an ablation study. What is the point of training a neural localizer if the camera poses are known from a different system? \n- How are the agent root and body part poses intiialized for Eq. 6? \n- How are agent and static scene segmented? \n- l. 251ff, Is the training and swapping of beta performed across various scenes (i.e. different types of rooms) or only within specific scenes? \n- l. 263, why was T*=5.6s chosen and how does the performance of the method depend on the horizon length? This should be evaluated as supplemental results.\n- Fig. 2, it is not clear what the thin black arrows mean (e.g. between the motion fields and between score map and goal/path). What is the motion field depicting? Where is this output in the proposed method?\n- l. 304ff., the note on generalization to different actions seems problematic. The datasets used in the experiments seem small. The biggest dataset (cat) has only 22 training videos of a few minutes each. The anecdotal result pointed to in the supplemental material (Fig. 11) might be a rare example. Please discuss and revise the claims.\n- Eq. 11, what is \\Psi_s ? Please define.\n- l. 314ff, the text mentions trajectories, but the equation only transforms a single pose (\\xi^a). Please clarify.\n- l. 318ff, please explain why the approach uses the observer trajectory perceived by the agent and not the other way round. Please also provide an evaluation if this makes a difference. \n- l. 360, a segmentation method is mentioned,but it is not stated how it is used.\n- l. 401. GT relative camera pose is obtained by some unspecified process. How does it work in detail and how accurate is this process? Please provide this description in the supplemental material.\n- l. 428ff / Tables 4 and 5: the motion prediction is conditioned on ground-truth goal and path for the evaluation. However, this should also be evaluated for regressed goal and path to fully evaluate the proposed method without such oracle information.\n- How were ground-truth goal and path annotated in the data ?\n- It seems the paper lists several datasets (bunny, human, etc), but only shows results for the cat videos.\n- If possible, the datasets used in the evaluation should be made publicly available for future comparison with the proposed method. Is it planned to release the datasets publicly ? Please also specify how this data was collected. Are there potential personal data protection issues for releasing the data?\n- The datasets used for evaluation are rather small and, hence, results seem anecdotal and significance is unclear. The method should be evaluated quantitatively on more and longer sequences and a larger variety of agents (e.g., 10-20 different animals and human subjects) and scenes (e.g. 10-20 different rooms) to increase significance of results and to provide further insights into the performance and limitations of the proposed method. Also the variety of daily activities should be increased and annotated. Cultural and gender diversity of subjects should also be considered when designing a benchmark dataset. With larger datasets, also the dependence of the performance of the method on the amount of available training data could be evaluated. \n\nFurther minor comments:\n- p. 2, please add a label/caption to the lower figure and reference it from the text.\n- l. 76ff, the notation for \\sigma, c, \\psi and the variants c_s/a, \\sigma_s/a, and \\psi_s/a is not accurate. Are the subscripts referencing parts of the variables without subscript? What kind of mathematical structure are they? \n- l. 210 \"buil\" => \"build\"\n- Fig. 2 caption \"fully\" => \"full\"\n- Eq. 11, \"X_w\" should be \"X^w\"\n- l. 369, the reference to Eq 5 should point to Eq 6"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. I assume Equation 1 and 2 are through NERF context. it is not clear to train them, how to get the poses to train them?\n2. How to train Equation 2 when agent is in motion. Elaboration is needed.\n3. A picture is needed to show the representation of agent. It is not clear what is meant by root and how that is identified from video automatically for any morphology. \n4. Satio et. al. uses SMPL morphology where getting the skinning is understood. Here the agent can be anything. So the skinning part needs more elaboration to understand by the reader. What is meant by \"bones have time varying centers\" and how this impact the skinning? Need picture with explanation.\n5. What do we mean by set of 3D Gaussian as bones? How they are fitted in video? How they are initialised?\n6. Scene specific neural localiser regresses the camera poses, how to get the GT poses? In the following paragraph it is written that \"to learn neural localiser we first capture a walkthrough video and build a 3D map..\" how to build this 3D map? where the poses are coming from? Is this video is the same we are dealing for 4D reconstruction? Lots of confusion here.\n7.If the above 3D is in place , we can sample R*, T* and solve Eqn 5. But what is theta there and what is the importance of that parameter? Agent can move while R* can be fixed. This is not well understood.\n8. Dynamic 3DGS also require poses. How to get them? Equation 5 needs more illustrative explanation.\n9. Because the writing of registration section is bit convoluted questions are arising for Behavious representation and Goal generation. Why score based method was needed for generating goal? VAE could be good enough?\n10. Scene observer and past encoding is a complex design. When the scene is already encoded as a MLP(Eq 1,2) it is not clear why another encoding? Or am I missing something?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper shows an original pipeline to reconstruct 4D representation from multiple videos over longer timespan like a month. This is a complicated problem and the method solves this with good qualitative and quantitative results. \nThe choice of representation in every stages is carefully thought through and use of the body of knowledge is good. For example, the study by Banani et.al, which shows that large image models have good 3D and viewpoint awareness, is exploited nicely in this paper.\nThe decoupling of static and dynamic structure and registration is well designed and merged to solve the complicated relationships exist in a long scene with different variation in layout or color over time."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method for learning interactable behaviour of agent from casual video. To this end, the reconstruct the video (agent, scene and observer) in 4D. The representation has canonical structure in 3D for time independent element using NERF and a time varying structure (observer and agent). The paper proposes a coarse to fine registration of agent and environment into a canonical 4D space. This 4D representation further helps in behaviour representation further used in understanding the behaviour of agents and goal conditioned trajectory generation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Despite the strength, the paper writing is not very good for a reader. Many places the approach is not well understood. In many places argument is placed without much conviction, for example, neural localiser finds more robust than geometric correspondence while being computationally efficient is not well grounded through technical principles or empirical evidences. The ego perception of the world and scene, observer, and past encoding needs more elaboration in writing to make them understand. It is not clear how the path is generated given a goal? is this a shortest path or any other path. More doubts are raised as questions below.\n\nBecause the writing, required illustrations, and the flow of thought is bit convoluted, I am tending towards the low score of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have some questions on the paper writing:\n\n1. In eq.8, both \\sigma and \\epsilon indicate the noise? since it is mentioned in line 303 that \\sigma is a noise value. If it is the case, please clarify: Does \\epsilon indicate the variance of the noise? \n2. In line 310, how large is the volume queried from the 3D feature volume?\n3. What is the definition of the generated path? Is it the root translations in the scene?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "I appreciate the careful 4D agent-to-sim pipeline design, including canonical and time varying structure design, the camera localization and the optimization procedure. It enables the improvement of the interactive behavior simulation results as verified in the rendering quality and Interactive behavior prediction. \n\n1. the simulated behaviors or animations of cats and humans looks realistic and interacts with the scene geometry in a physics-plausible way, no obvious penetration between the agent and the scene geometry in the video demos. \n2. The hierarchical behavior simulation model, from goal, path to body poses, and egocentric perception features in this paper are verified to be effective in improving the accuracy of the behavior prediction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a 4D reconstruction method and environment-aware behavior generation method for 3D agents. It represents the canonical structures for the scene and agents by MLPs as in NeRF, and represents the time-varying structure with camera pose, root pose and bone transformations at each frame. These structures are all learned from casually captured videos. After recovering the structures, three diffusion models are trained to generate goal, path and body poses conditioned the 3D feature field of the scene. \n\nThe main contribution of this paper is a complete pipeline to reconstruct and simulate interactive behaviors of 3D agents in a dynamic scene."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It is not clear how to collect the training data for neural camera localization. Does the camera move along with the agent or fixed during capturing? If it is fixed, there are only a few camera poses that can be used in the training. If it can move, that means we already have an algorithm to obtain accurate camera poses for training. \n2. The symbol \\sigma is used to represent density in Sec. 3.1 and noise in Sec. 3.3, a kind of misleading when reading the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Given monocular videos collected across a long time horizon (e.g., 1 month), we build interactive behavior models of an agent grounded in a 3D environment."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024agenttosim,\ntitle={Agent-to-Sim: Learning Interactive Behavior Model from Casual Longitudinal Videos},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y80D4IojuY},\nnote={under review}\n}"
},
"abstract": {
"value": "We present Agent-to-Sim (ATS), a framework for learning interactive behavior models of 3D agents in a 3D environment from casually-captured videos. Different from prior works that rely on marker-based tracking and multiview cameras, ATS learns natural behaviors of animal and human agents in a non-invasive way, directly from monocular video collections. Modeling 3D behavior of an agent requires persistent 3D tracking (e.g., knowing which point corresponds to which) over a long time period. To obtain such data, we develop a coarse-to-fine registration method that tracks the agent and the camera over time through a canonical 3D space, resulting in a complete and persistent spacetime 4D representation. We then train a generative model of agent behaviors using paired data of perception and motion of an agent queried from the 4D reconstruction. ATS enables real-to-sim transfer of agents in their familiar environments given longitudinal video recordings (e.g., over a month). We demonstrate results on pets (e.g., cat, dog, bunny) and human given monocular RGBD video collections captured by a smartphone."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"dynamic 3d reconstruction; multi-video registration; motion generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8b9663f15b6e43ab90c953d7cd0be864f04ebd82.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/3f51894728dacc7925fdc0f0df2264a11856c83f.zip"
},
"title": {
"value": "Agent-to-Sim: Learning Interactive Behavior Model from Casual Longitudinal Videos"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y8TjnkdWNA | Balancing Label Quantity and Quality for Scalable Elicitation | main | Active | Scalable oversight;Alignment;Safety;Few-shot learning;Eliciting latent knowledge;Weak-to-strong generalization | alignment, fairness, safety, privacy, and societal considerations | 3;3;5 | 3;3;3 | 3;2;3 | 2;2;2 | 1;2;2 | 3.666667 | 3 | 2.666667 | 2 | 1.666667 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could the paper provide more explanations about how the \"true\" labels for evaluation is obtained? If people cannot provide accurate high-quality labels, how do we evaluate machine performance on these hard tasks? Is there a chicken-and-egg problem here?\n\n2. Could the authors comment on how the reported results compare with human performance?\n\n3. What's the definition for these \"hard\" tasks? One precise piece of information mentioned in the paper is that people are \"less than 90% accurate\", but an accuracy of 90% does not appear too hard to me.\n\n4. Addressing my other main concerns in the \"Weaknesses\" section would be helpful."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper poses an important and interesting question about what the best methodology (i.e., what type of data to collect and what training method to use) is in this challenging regime of \"hard\" tasks where human data is unreliable or expensive. Improving LLMs' ability to generalize to these tasks has lots of potential in making positive societal impacts.\n\n2. The paper presents a wide range of experimental settings and results, which provide useful information for future practitioners working on improving LLMs on these hard tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers the task (termed \"scalable oversight\") of training LLMs on problems that human judgment is unreliable or expensive. The paper conducts experiments that vary {the type of large models, the datasets and associated tasks, the training methods}. Using results from these experiments, the paper observes different regimes where it can be more effective to only collect high-quality data, low-quality data, or a mix of both."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While I appreciate the efforts and empirical results by the paper, I feel that this paper looks more like an engineering technical results for the following reasons:\n\n1. Scientific rigor:\n\n(a) Please include error bars on all reported plots. If such errors are not available, please provide discussions on the statistical significance of the reported observations.\n\n(b) The experimental observations can be made more precise. For example, while the paper claims that \"$256-$512\" is the mixed regime in Figure 1, this appears very task dependent in Figure 2.\n\n(c) The discussion on limitations can be more in depth. For example, a distinction can be made on low/high-quality data provided by human vs. smaller LMs (as in the experiments). Does the paper specifically study training data provided by LMs? Does this generalize to low/high-quality data provided by people or not? Also in the experiments, the smaller LMs are trained on other data (\"we generate weak labels using small LMs that have been finetuned on the task\"), which confounds the conclusions made in the paper about low/high-quality data in the experiments.\n\n2. Unsubstantiated contributions:\n\n(a) The second claimed contribution includes \"microeconomic assumptions\", but as far as I can tell, this is not investigated in the paper. Appendix B appears unfinished. \n\n(b) I also think the third claimed contribution is very limited. While research framing can be a valid contribution, in this case, the problem of data usage and training methods is a well-known research topic and extensively studied in the past. The Pareto frontier is essentially a piece of empirical result that provides some practical evaluation and guidance on what training methods work better in what settings.\n\n2. Lack of context:\n\nI find the background knowledge introduced in this paper insufficient for audience beyond LLM practitioners. For example, what are the tasks \"BoolQ, HellaSwag, SciQ, Cola, ...\"? What's \"LoRA\" for supervised finetuning? What does it mean to \"early-stop based on validation AUROC\"? Why are \"Qwen 1.5-0.5B and Qwen1.5-4B\" chosen for the initial set of experiments? What's a new \"head\" for training? What's the procedure for \"few-shot prompting\" and what are \"in-context examples\"?\n\n3. Insufficient literature review:\n\nThe literature review only provides citations on very recent advances specifically tied to LLM development. I personally find it insufficient without giving credits or contextualizing the research question with respect to other lines and domains of work. Two of such domains are:\n\n(a) The field of crowdsourcing primarily concerns how to make use of noisy human data, which is closely relevant to the paper.\n\n(b) The field of economics computation literature concerns data valuation and pricing, which is also closely relevant to the paper.\n\n4. Clarity (minor):\n\n(a) In the result section, the paper mentions \"quality of weak labels\". What does this mean? I thought weak labels mean low quality in this paper.\n\n(b) I suggest the paper using different terms than \"quantity-dominant\" and \"mixed\". These terms are confusing because the quantity-dominant regime cannot afford enough weak labels, and the mixed regime can afford enough weak labels.\n\n(c) The wording \"scaling results\" in Section 4.1 is vague. What results does this refer to?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Why choose NLP classification task as the one to test this tradeoff? Will the result holds for other tasks? If this is unclear, what should the main takeaway for the reader of the work to be?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The question on the trade-off between quantity and quality is interesting; and the finding that no pure strategy (few high quality data and much low-quality data) is clear. The results are supported by extensive experimentation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper explored different ellicitation techniques on improving an NLP binary classification tasks under some fixed budgets. Under this budget, one can either have a lot of low quality data, or a few high quality data, or a mixed between the two. Using these tasks, the author explored the pareoto frontier between mixing the low and high quality data, finding that no pure strategy dominates."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My biggest concernis that the insights of the paper is not very clear. As it is true that there is a trade-off between quantity and quality, it is unclear how the specific finding in this paper can be generalizable to tasks beyond NLP binary classifications showcased in the paper. In this case, it might make sense to come up with a scaling law that captures this tradeoff."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- How and why were the tasks binarized? Does binarization meaningfully affect the results? \n- It seems like the gains from using only weak labels are generally quite limited. How does this relate to the results from Burns et al (2023)? \n- How do you determine that \"256 and 1024 high-quality finetuning examples do not reliably elicit knowledge from Llama-3-8B, but elicit most of Llama-3-70B’s knowledge.\"?\n- \"The quality of in-context examples is inconsequential, while the quality of SFT examples matters substantially.\"\n - Is this refering to a specific regime? The top/bottom left of figure 4 seems to show a big difference, at least for 2/8 in-context and few SFT samples. \n- Is there a typo in the caption of Figure 3? The accuracy of the weak labels displayed in the figure appears to be larger than 70.2%. Or is this the difference between training/test accuracy?\n- \"We use 3 random seeds, except for training runs where the smaller stage takes less than or equal to 10 examples, in which case we use 7 random seeds\"\n - How do you get less than 10 examples when the minimum budget is at 16?\n- Regarding the GPT-4 results, how well does GPT-4 perform on the tasks in a zero-shot manner?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The research question on quality-quantity tradeoffs is interesting. \n- While quite specific to the weak-to-strong setting, the experimental setup seems to be well-suited for the research question. \n- The experimental results cover multiple tasks and models and are somewhat comprehensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "- This paper investigates the quality/quantity tradeoff when finetuning an LLM on cheap low-quality and expensive high-quality labels. \n - The low quality labels are produced by a small LLM that has been finetuned to the task at hand (on a separate datasets), while the high-quality labels appear to be ground-truth labels. \n - Assuming that the cost of low-quality labels is ten times lower than the cost of high-quality labels, the authors find three regimes, depending on the available budget: For small budgets, performance increases in the fraction of low-quality labels used for training. For large budgets, performance instead decreases when more low-quality labels are used. Lastly, for intermediate budgets, performance first increases and then decreases in the percentage of low-quality labels.\n- Experiments are done on multiple (binary) NLP classification tasks, and include different training strategies, sometimes combined with few-shot prompting. \n- Ablations are conducted using different models, both for label generation and finetuning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper appears to have been submitted in an unfinished state\n - Different microeconomic assumptions are mentioned as part of the contributions and discussion, but I did not find any discussion of these in the main text (and very little in the appendix).\n - There is a range of minor issues that are unproblematic on their own, but add up: \n - The paper uses the wrong template (ICLR 2024 rather than 2025)\n - The paper containe multiple instances of questions in odd places that appear to be todos by the authors. For example, \"All datasets?\" at the end of the references, and \"What about scenarios where the cost you invested into training up your labelers or understanding a problem has externalities for other training runs etc?\" in Appendix B. \n - A bunch of citations are missing details like the arxiv identifier\n - Figures 1 and 2 are presentented in double column without any apparent reason, making things more difficult to read. \n - The logical flow of the writing could be improved at times. For example: \"the role of pretraining in weak-to-strong generalization\" is mentiond before weak-to-strong generalization is introduced, making the remark difficult to understand. \n - There is also room for improvement in terms of \"local\" writing clarity. For example:\n - \"Results broken down by each of the three datasets can be found in Appendix figures 6, 7, and 8, suggesting that the results hold across tasks and weak label qualities.\"\n - What result is supposed to \"hold\" here? I can see that the pareto-fronts have somewhat similar shapes, but what does that imply?\n - The description of the quantity-dominant setting is difficult to parse (perhaps in part due to the dual-column structure).\n- Apart from the paragraph on scalable oversight, the related work section is relatively sparse and could benefit from incorporating existing work on sample values [1] and quality/quantity tradeoffs in machine learning [2,3,4].\n- While this might be hindsight bias speaking, I do not find the main result, that a mixture of higher quality and lower quality labels is optimal, particularly surprising. For example, while [3] is not fully comparable, in part because high quality labels are selected actively, that work also finds a mixture of higher and lower quality labels to be optimal for training. \n\n\n[1] Torralba, Antonio, and Alexei A. Efros. \"Unbiased look at dataset bias.\" CVPR 2011. IEEE, 2011.\n\n[2] Sheng, Victor S., Foster Provost, and Panagiotis G. Ipeirotis. \"Get another label? improving data quality and data mining using multiple, noisy labelers.\" Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining. 2008.\n\n[3] Chen, D., Yu, Z., and Bowman, S. R. Clean or annotate: How to spend a limited data collection budget. arXiv preprint arXiv:2110.08355, 2021.\n\n[4] Crammer, Koby, Michael Kearns, and Jennifer Wortman. \"Learning from data of variable quality.\" Advances in Neural Information Processing Systems 18 (2005)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We compare learning methods and optimally trade off the quantity and quality of labels to elicit knowledge from a pretrained model in a labeling-cost constrained setting."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024balancing,\ntitle={Balancing Label Quantity and Quality for Scalable Elicitation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y8TjnkdWNA},\nnote={under review}\n}"
},
"abstract": {
"value": "Scalable oversight studies methods of training and evaluating AI systems in domains where human judgement is unreliable or expensive, such as scientific research and software engineering in complex codebases. Recent work in this area by Burns et al. (2023) suggests that Language Models (LMs) pretrained on internet-scale corpora exhibit an inductive bias toward producing correct answers, even when finetuned on error-prone labels produced by a smaller language model. This suggests that massive pretraining combined with finetuning on imperfect human labels may be a solid baseline method for scalable oversight. In the real world, however, label quality is not fixed: practitioners face a quantity-quality tradeoff when generating finetuning data. In this paper, we explore the microeconomics of the quantity-quality tradeoff on binary NLP classification tasks used in Burns et al. (2023). We find that there are three regimes of eliciting classification knowledge from pretrained models using supervised finetuning: quantity-dominant, quality-dominant, and a mixed regime involving the use of low- and high-quality data together to attain higher accuracy at a lower cost than using either alone. We explore sample-efficient elicitation methods that make use of two datasets of differing qualities, and establish a Pareto frontier of scalable elicitation methods that optimally trade off labeling cost and classifier performance."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Scalable oversight",
"Alignment",
"Safety",
"Few-shot learning",
"Eliciting latent knowledge",
"Weak-to-strong generalization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d7d785a08dc058fb74bcf8d3237f484481fefbf6.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Balancing Label Quantity and Quality for Scalable Elicitation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y8qBBbAdEv | Towards a Knowledge guided Multimodal Foundation Model for Spatio-Temporal Remote Sensing Applications | main | Active | Foundation model;Spatiotemporal modelling;Remote Sensing;Knowledge guided | foundation or frontier models, including LLMs | 1;3;5;5 | 4;4;3;3 | 1;2;2;3 | 1;2;3;3 | 1;1;3;2 | 3.5 | 3.5 | 2 | 2.25 | 1.75 | -0.904534 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Both the statements and technical aspects should be refined for rigor. A thorough revision is recommended before resubmission."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The topic is interesting and valuable. The authors commendably attempt to combine physics-driven weather data with observation-based remote sensing data to explore the correlation between these two modalities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents an approach that aims to integrate physics-driven weather data with observation-based remote sensing data to investigate the correlation between these two modalities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some opinions and assertions in the paper lacks experimental or theoretical support. For example:\n * Line093: The claim that \"temporal flexible architecture is useful for generalizing across downstream tasks\" is presented without the necessary theoretical backing or experimental validation.\n * Line096: The assertion that the embedding created by the proposed architecture is richer than those using reconstruction tasks is only substantiated by results from the authors' framework, lacking comparison with other methods and relevant theoretical derivation.\n2. Unclear Methodology and training detail description.\n * The description of the methodology lacks formalization, rendering the training process unclear.\n * There is insufficient detail regarding the network architecture and training procedures such as the hyper-parameters, training and fine-tuning epoch number, batchsize, and etc.\n * The language employed in the paper lacks academic rigor, featuring informal expressions (e.g., using \"let us look at …”), grammatical errors, improper punctuation, formatting issues (e.g., using $\\times$ instead of x), incorrect capitalization, and various typos. A revision of the writing is strongly recommended.\n3. Deficiencies in Experimental Design. \n * The baseline comparisons in the experimental part can only be classified as ablation studies, as they do not include comparisons with other foundational models.\n * In the downstream task validation, comparisons are limited to the authors' ablation models, without benchmarking against state-of-the-art models in the relevant application field.\n * Given that many pretrained ViT models (even without fine-tuning in the RS domain) achieve commendable performance in remote sensing tasks, the authors should also compare their results with those of pretrained models.\n * There is a lack of experimental design concerning the model's generalization capabilities. As a foundational model, the experiments are supposed to involve more types of downstream tasks and cover remote sensing data from more satellites.\n4. Suboptimal Experimental Results. The visual results in this paper appear to be of limited utility, and there is also a lack of comparison with results from other MAE-based or contrastive learning-based foundation models."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why is causal encoding applied to the fused multimodal embeddings, while Bi-LSTM is used for the weather data? Logically, weather data should also be unidirectional.\n\n2. In Figure 6, the non-restored areas in the SM-VSF results exhibit mosaic artifacts and appear inconsistent with the original image. Does this method affect regions of the image outside the restored areas?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors applied variable-step prediction, commonly used in time series forecasting, to the pretraining task of a remote sensing multimodal foundational model and achieved performance improvements in two downstream tasks. This is a preliminary exploration of a remote sensing multimodal foundational model in spatiotemporal prediction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a foundational framework for exploring the causal relationships between temporal textual data and remote sensing imagery. The authors first use a Bidirectional LSTM to model the temporal text, followed by a Unidirectional Transformer to extract the causal relationships between the textual and image features. Ultimately, this framework can generate corresponding remote sensing images based on weather data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The impact of weather on the environment is a highlight of this paper and should be its most compelling section. However, this topic is only briefly mentioned in the abstract and introduction. Unfortunately, no subsequent experiments are conducted to discuss the specific effects of weather on remote sensing imagery, which is quite regrettable. The explanation of the causal relationship between weather and imagery seems to be solely connected to the use of the causal Transformer model, which feels somewhat tenuous. I suggest that the authors present several cases to demonstrate how different weather models affect the prediction outcomes or conduct an ablation study to isolate the contribution of weather data to the model's performance.\n\n2. The model's performance is limited. Specifically, in Figure 8, I even feel that the MM-MAE results visually appear superior to those of SM-MAE. The SM-MAE results seem somewhat more blurred. Additionally, the images in Figures 6 and 8 are quite similar. I would encourage the authors to provide more qualitative analysis to demonstrate their method's advantages better. I suggest that the authors conduct a detailed analysis of challenging individual cases, or visualize the learned representations to illustrate how they capture weather-related information.\n\n3. The entire paper seems to rely on only two quantitative analysis experiments, presented in Tables 1 and 2, which feel somewhat insufficient. The authors have designed a complex encoder composed of multiple components, but no ablation studies have been conducted to justify the selection of each component. This makes it difficult to be convinced that the author's choices are optimal. I suggest the authors include ablation experiments regarding the model architecture. For example, the additional experiments mentioned above."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. In light of Weakness 1, the authors are encouraged to clarify the motivation and considerations underlying the proposed method.\n2. Could you provide a detailed explanation of the design rationale behind your framework?\n3. CDL is widely recognized for its association with noisy labels. Do you implement any label correction on this data, and if so, what specific steps do you undertake?\n\nAdditionally, the authors are encouraged to address the concerns outlined in the Weakness section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The concept of establishing a geoscience foundational model from the perspective of forecasting future spectral imagery is novel, and the utilization of weather information in this context has not been fully explored by the community before. This paper presents an intriguing solution tailored specifically to a foundational model for geoscience.\n2. Overall, the writing is satisfactory; however, there are some significant drawbacks that I will address later."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an innovative foundational model framework for geoscience, which undertakes the task of forecasting future spectral imagery using weather information and spectral imagery from previous timestamps. Through the construction of a MultiModal Variable Step Forecasting (MM-VSF) paradigm, the authors illustrate the superiority of MM-VSF in two downstream tasks: crop mapping and missing image prediction. When compared to MAE pre-training and single-modality scenarios, the proposed method demonstrates enhanced performance, thereby validating the significance of incorporating weather information in certain remote sensing tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The fundamental assumption may be flawed. While it is reasonable to assert that weather can be an influential factor in certain land use or land cover predictions, forecasting spectral imagery solely based on weather and previous spectral imagery clearly overlooks essential factors, such as human activities. For instance, in crop mapping predictions, farmers may choose not to cultivate any crops in a given year, a decision that is entirely independent of weather conditions. Similarly, the segmentation of urban functional areas is also not contingent upon weather; the construction of buildings occurs regardless of weather considerations. Therefore, the current pre-training task appears to be an inadequate choice for addressing a wide range of remote sensing tasks, rendering the fundamental assumption of this paper unconvincing.\n2. The presentation of the main methodology lacks clarity. For instance, the data format for the weather information is ambiguous, and it is unclear how the bi-directional LSTM processes this weather data. I suggest providing a formal description of both the input and output data to illustrate the data flow pipeline, as this would offer a clearer overview of the methodology. Furthermore, the rationale behind selecting ViT instead of SwinViT, as well as the choice of LSTM over GRU, is not explicitly addressed. It appears that the selection of these building blocks is somewhat arbitrary and lacks specific consideration within the framework's design.\n3. The experimental results presented are primarily an ablation study. In the main experimental section, the authors compare the proposed MultiModal Variable Step Forecasting (MM-VSF) model solely with various combinations of single modalities and MAE-based pre-training methods. The current results demonstrate only the necessity of the weather modality and the superiority of the Variable Step Forecasting over MAE. However, there is insufficient evidence to support the assertion that MM-VSF is a suitable choice for universal remote sensing interpretation tasks. I strongly recommend that the authors compare MM-VSF with well-established remote sensing foundational models, such as SkySense (CVPR 2024), SatMAE++ (CVPR 2024), and DeCUR (ECCV 2024), using recognized benchmarks such as BigEarthNet, fMoW, and DIOR. Additionally, the potential of contrastive learning as an avenue for developing remote sensing foundational models is not adequately addressed in this paper.\n4. The analysis of the learned causal relationships is notably absent. In the abstract and introduction, the learned causal relationship between weather and spectral imagery is presented as a significant contribution of this paper; however, a detailed analysis appears to be lacking. I would appreciate the inclusion of quantitative results or mathematical induction, as causality is a well-established mathematical concept, to further elucidate this aspect.\n5. Several important related works appear to be missing from the discussion. Regarding weather forecasting, the authors should consider including GraphCast (Science 2023) and Fuxi (Nature Communications 2024). Additionally, in the realm of contrastive learning, notable contributions such as DeCUR (ECCV 2024) and DINO-MM (IGRASS 2022) should also be acknowledged."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does the variable step forecasting task handle significant time gaps in satellite data? For example, how does it manage situations where satellite imagery is missing for extended periods?\n2. Why the shape of weather data is 1x1?\n3. In the crop mapping task, how sensitive is the performance of MM-VSF to the temporal resolution of the input data? Would increasing or decreasing the frequency of the input images change the results significantly?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The introduction of a multimodal pretraining task adds a novel aspect to the existing methods in the geoscience foundation model landscape. The paper is generally well-structured. The ability to generalize across years and handle missing data offers practical utility in real-world settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a pretraining framework called MultiModal Variable Step Forecasting (MM-VSF). The proposed foundation model leverages multimodal data, specifically satellite imagery and weather data, to improve spatio-temporal remote sensing tasks like crop mapping and missing image prediction. The core idea is to pretrain the model using a forecasting task that captures causal relationships between the modalities, improving generalization and representation quality for downstream tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Insufficient experiments: My main concern about this paper is the lack of comprehensive experiments. The baseline models could be expanded, for example, by including comparisons with contrastive-learning-based methods to provide a more holistic evaluation.\n\n2. Lack of fine-grained ablation studies: The paper could benefit from more fine-grained ablation studies, such as experimenting with different masking ratios and strategies. This would help clarify the specific impact of these hyperparameters on model performance.\n\n3. Limited downstream tasks: The range of downstream tasks is insufficient. Including tasks like urban semantic segmentation mapping, which is highly relevant in the field of remote sensing, would strengthen the generalizability of the approach.\n\n4. Incomplete related work: The related work section is not sufficiently comprehensive and clear. There is a lack of discussion and comparison with relevant multimodal foundation models in remote sensing, particularly models that also utilize satellite and weather data [1-2]. Incorporating these references and analyzing their similarities and differences with the proposed approach would offer better context and positioning.\n\n5. Figures: Figures 1 and 2 have unclear text, which may hinder the understanding of key concepts. Improving the clarity of the text in these figures would enhance the overall presentation quality.\n\n[1] Ravirathinam, Praveen, et al. \"Combining Satellite and Weather Data for Crop Type Mapping: An Inverse Modelling Approach.\" Proceedings of the 2024 SIAM International Conference on Data Mining (SDM). Society for Industrial and Applied Mathematics, 2024.\n[2] Nedungadi, Vishal, et al. \"MMEarth: Exploring multi-modal pretext tasks for geospatial representation learning.\" arXiv preprint arXiv:2405.02771 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards a Knowledge guided Multimodal Foundation Model for Spatio-Temporal Remote Sensing Applications},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y8qBBbAdEv},\nnote={under review}\n}"
},
"abstract": {
"value": "In recent years, there has been an increased interest in foundation models for geoscience due to the vast amount of Earth observing satellite imagery. Existing remote sensing foundation models make use of the various sources of spectral imagery to create large models pretrained on the task of masked reconstruction. In this paper, we present a foundation model framework, where the pretraining task captures the causal relationship between multiple modalities. Our framework leverages the knowledge guided principles that the spectral imagery captures the impact of the physical drivers on the environmental system, and that the relationship between them is governed by the characteristics of the system. Specifically, our method, called MultiModal Variable Step Forecasting (MM-VSF), uses forecasting of satellite imagery as a pretraining task and is able to capture the causal relationship between spectral imagery and weather. In our evaluation we show that the forecasting of satellite imagery using weather can be used as an effective pretraining task for foundation models. We further show the effectiveness of the embeddings produced by MM-VSF on the downstream tasks of pixel wise crop mapping and missing image prediction of spectral imagery, when compared with embeddings created by models trained in alternative pretraining settings including the traditional single modality input masked reconstruction."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Foundation model",
"Spatiotemporal modelling",
"Remote Sensing",
"Knowledge guided"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0af57440475b30c9e88033babe54360898e2ab1c.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards a Knowledge guided Multimodal Foundation Model for Spatio-Temporal Remote Sensing Applications"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y8uPsxR8PN | Sort-free Gaussian Splatting via Weighted Sum Rendering | main | Active | Sort-free;Gaussian Splatting;Weighted Sum Rendering | applications to computer vision, audio, language, and other modalities | 5;6;6;8 | 4;3;3;4 | 2;3;2;3 | 2;3;3;4 | 3;2;3;4 | 6.25 | 3.5 | 2.5 | 3 | 3 | 0.229416 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In the paper, the author points out that for some complex scenes, since a large number of 3D Gaussians are needed for representation, the sorting process will exhaust the resources of mobile devices, making real-time rendering impossible, such as the bicycle scene in Mip-NeRF 360 dataset. However, I tried to visualize the bicycle scene using a gsplat-based webgl viewer on my phone (iPhone 15 Pro Max), and was surprised to find that it achieves real-time rendering, about 20-30fps. This seems to indicate that some optimizations to the 3DGS rendering pipeline (gsplat uses some tricks) can increase the rendering speed on mobile devices without more complex designs. I would like to know the author's opinion on my point of view and more analyses of the key differences that enable real-time performance on mobile devices. And they can test the performance of gsplat on Snapdragon® 8gen3 chipset and compare with their proposed method.\n\nHere is the link of gsplat webgl-viewer https://gsplat.tech"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The author provides a clear analysis of the tile-based sort in the vanilla 3DGS, explains its impact on rendering speed, and explains why it is difficult to implement on other devices.\n- The proposed method speeds up rendering while ensuring rendering quality, and has been verified on consumer mobile devices.\n- The proposed weighted sum approximation alleviates the *pop* phenomenon caused by inconsistent sorting results under different view-directions in the vanilla 3DGS."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a sort-free Gaussian Splatting pipeline, which reduces the computational overhead of rendering to adapt to scenarios with limited computing resources. The key insight of this paper is that the view dependent sorting in the vanilla 3DGS involves a lot of computational overhead and is difficult to port to mobile devices. Therefore, the authors proposed to use weighted sum and view-dependent opacity to approximate alpha-blending. Experiments demonstrate that proposed method achieves comparable rendering quality and faster rendering speed than the original 3DGS on mobile devices."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The proposed method gets rid of the costly sorting, but a more complex weight and view-dependent opacity are used. Although the author points out in the paper that a general scheme for learning scene representations is bound only by the mathematical constraints of the scene model. According to the experimental results in the paper, the use of simple weighted-sum and view-independent opacity cannot achieve satisfactory rendering results. Does this mean that for rendering processes that are not physically and optically, more complex models must be used, such as introducing more learnable parameters as in the paper?\n- In the conclusion part of the paper, the author mentioned some recent research on compact Gaussian Splatting. According to my understanding of these works, since the methods proposed in these works obtain more compact 3D Gaussian representations, they can reduce the sorting overhead and speed up rendering to a certain extent. Therefore, I think the author should add a comparison of rendering quality, speed and memory usage between the proposed method and these works, such as Compact 3DGS[1], LightGaussian[2] and Compressed 3DGS[3] in the experimental evaluation section.\n\n[1] Compact 3D Gaussian Representation for Radiance Field https://openaccess.thecvf.com/content/CVPR2024/papers/Lee_Compact_3D_Gaussian_Representation_for_Radiance_Field_CVPR_2024_paper.pdf\n[2] LightGaussian: Unbounded 3D Gaussian Compression with 15x Reduction and 200+ FPS https://arxiv.org/pdf/2311.17245\n\n[3] Compressed 3D Gaussian Splatting for Accelerated Novel View Synthesis https://openaccess.thecvf.com/content/CVPR2024/papers/Niedermayr_Compressed_3D_Gaussian_Splatting_for_Accelerated_Novel_View_Synthesis_CVPR_2024_paper.pdf\n\n[4]"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Does the method exhibit color shift artifacts during camera movement along the z-direction, as suggested by the concerns regarding linear weighting?\n- Since opacity is not capped at 1, how did you enforce the data range of final color, simply clamp?\n- How do other Gaussian attributes maintain their geometric meaning in this framework? Specifically: Does the mean of Gaussians still correspond to point cloud locations? How about depth extraction?\n- While traditional 3DGS implementations employ culling to manage computational complexity, does this approach require weighted summation of ALL Gaussians? What are the implications for scalability?\n- Please clarify the memory usage analysis: does it encompass only Gaussian attributes or include runtime memory requirements?\nWhat is the memory overhead of spherical harmonics coefficients for view-dependent opacity compared to vanilla 3DGS?\n- Regarding Table 2, could you elaborate on the starred data points showing significantly higher values? How does resource exhaustion due to sorting impact subsequent pipeline stages?\n- Given the potential for hardware-accelerated sorting methods specifically designed for 3DGS, possibly through hardware-software co-design, how might this work adapt to or complement such future developments?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper aims to address a fundamental bottleneck in 3DGS rendering by proposing a sort-free pipeline, distinguishing itself from previous approaches that primarily focused on reducing Gaussian counts.\n- The investigation of various weight derivation functions (direct sum-up, exponential weighted, and linear correction weighted) demonstrates a relative comprehensive experimental analysis effort.\n- Implementation and evaluation on mobile devices effectively demonstrates the practical benefits of the sort-free approach in resource-constrained environments.\n- The method appears to mitigate \"pop\" artifacts, a persistent challenge in volumetric rendering, providing an additional benefit beyond computational efficiency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a novel rasterization backend for 3D Gaussian splatting (3DGS) that addresses the computational and memory overhead associated with depth sorting. Their primary contribution is replacing traditional alpha blending with a weighted sum rendering approach. The rendering is achieved through direct weighted summation of Gaussians, where weights are determined by a parameterized truncated linear function applied to the product of color and opacity. Additionally, they introduce view-dependent optimizable opacity for individual Gaussians, modeled using spherical harmonics.\nTheir weighted sum rendering (WSR) technique demonstrates comparable rendering quality to conventional 3DGS. To validate the advantages of their sort-free approach, the authors implement both vanilla 3DGS and WSR on the Vulkan API and evaluate performance on mobile GPU hardware, achieving a 1.23x speedup."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The theoretical foundation for replacing alpha blending with an algebraic function requires stronger justification. While the proposed representation achieves good results, it lacks the physical insights inherent in traditional alpha blending, which models light obstruction during propagation. The introduction of view-dependent opacity appears necessary for acceptable rendering quality, but the paper would benefit from a more rigorous analysis of why this combination works, rather than empirical validation alone.\n- The preference for linear weights over exponential weights raises questions about robustness. When considering camera movement along viewing rays, exponential weights (with β ≈ 1) should theoretically provide more consistent mixing ratios between Gaussians. Linear weights may introduce color shift artifacts due to distance-dependent mixing ratio variations. While the authors claims \"some artifacts remain visible\" then selected linear weight rather than exponential weight, these issues warrant more detailed examination.\n- The method's generalizability is limited by the transformation of opacity into a view-dependent \"black box\" variable. This fundamentally alters the role of opacity, which traditionally serves as a crucial signal for pruning and densification in many 3DGS applications, potentially limiting compatibility with existing and future research in this domain."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Can the authors provide some qualitative results on the Bicycle scene?\n2. Can the authors provide some videos showing the removal of the popping effect?\n3. Can the authors show some failure cases of the proposed approximation?\n\nI really want to give a positive rating to this paper, because I know it is very difficult to make a sort-free rendering to achieve the same quality as the sort-based version. However, the marginal speed improvement diminishes the significance of this paper. Furthermore, I hope the authors can demonstrate some failure cases of such approximation methods, as they are quite common in traditional sort-free algorithms. Doing this could give the reader a more comprehensive understanding of the proposed method."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**Motivation**\n* The motivation of this paper is very clear. It clearly illustrates the drawbacks of the sorting requirement of 3DGS and the possible benefit of removing it. Sort-free transparent object rendering is also a well-studied direction in traditional rendering. Combining the algorithms from traditional computer graphics with 3DGS is well-motivated.\n\n**Method**\n* The proposed method transformed the traditional depth-based weighted rendering to a learnable version to better suit the per-scene optimization used in 3DGS, which provides significant performance improvement. \n* The proposed view-dependent opacity is an innovative approach that mimic the occlusion effect. Because this SH parameters are optimized together, it compensates for some performance loss from incorrect occlusion estimation without sorting.\n\n**Experiments**\n* The performance on normal desktop GPU demonstrates a small improvement in rendering speed and comparable rendering speed. In fact, it is already remarkable that a sort-free rendering algorithm can match the performance of a sorting-based rendering algorithm in a scene full of semi-transparent primitives.\n* The performance improvement on the edge device shows a more significant improvement because of the demanding sorting step."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a sort-free variant to the 3DGS algorithm. Because 3DGS is a semi-transparent representation, it requires sorting of all primitives which leads to higher running time, more hardware requirements, and lower compatibility on edge devices. Learning from the traditional sort-free rendering algorithm which replaces the sorting with an estimated weight from depth, this paper proposes to eliminate the sorting process of 3DGS completely. \n\nThis paper further proposes a view-dependent opacity to better simulate the occlusion effect. By learning spherical harmonics to represent the opacity, it allows the 3D Gaussians to appear and disappear at different angles, which could further mimic the occlusion effect.\n\nAs a result, this paper achieves a slightly higher rendering speed and lower GPU memory footprint, better compatibility on edge devices, and comparable rendering quality than 3DGS."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "** Motivation **\n* As shown in the paper, the sorting step is usually considered slow, but actually contributes to only 25% of the total rendering time. The real problem preventing 3DGS based method from matching the rendering speed of traditional methods is the alpha blending step. Although this paper removes the sorting step, it still needs the per-pixel alpha blending of Gaussians of calculated weights. As a result, the speed improvement is rather insignificant. \n* Rather than removing the sorting step, many works have proposed to removing the redundant Gaussians directly. These methods seem to provide much more significant speed boost. Of course these two method can be run in parallel, but it still weaken the importance of the proposed method. \n\n** Method **\n* The proposed method is largely borrowed from the traditional sort-free methods. I think this is acceptable because of the long history of sort-free rendering research in traditional computer graphics, it does negatively affect the novelty of the proposed method slightly.\n\n** Experiments**\n* As mentioned above, the improvement on standard desktop GPU is rather insignificant. Since sort-free rendering is essentially an approximation of the sort-based rendering, it might not be a good idea to choose this method for such a small improvement while there is a possibility of very bad performance on some scenes. \n* Continuing from the last point, sort-free approximations are usually inaccurate on small and thin structures. For example, I noticed that the proposed method on the bicycle scene in MipNeRF-360 dataset is somewhat lower than the original 3DGS method. This scene contains very thin and fine structures on the bicycle which could be a better illustration of whether the proposed method suffers from similar artifacts like the traditional methods. \n* Despite the significant rendering speed improvement on the edge devices, the paper lacks a detailed explanation of how the edge device running out of resources leads to such a big decrease in rendering speed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. In section 4.3 the authors reveal that view-dependent opacity is obtained by the addition of a new set of spherical harmonics related to the depth weighting introduced by the authors. In the experimental section, the authors show that this doesn't impact memory usage during rendering and a rationale given in the removal of the sorting step and the potential reduction in the number of Gaussians. However, this appears to be referring to instantaneous memory requirements. How does the introduction of the new set of spherical harmonics for each Gaussian affect post-training storage requirements of the radiance field? Is a greater storage footprint needed for the radiance fields produced by the proposed methods?\n2. Line 53 \"maintain rendering differentiable\" should be \"maintain rendering differentiability\".\n3. Line 141 \"Chris Wyman provide a good survey on these methods\" should be \"Chris Wyman provides a good survey on these methods\"\n4. Line 273, \"However, DIR-WSR does not work well for the complex scenes, \" should be \"However, DIR-WSR does not work well for complex scenes, \"\n5. Line 307 ends with an additional full stop."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This work solves a crucial problem in real-time rendering of Gaussian Splatting based radiance fields. By removing the need for depth-based sorting of the Gaussians before rendering, critical memory requirements are reduced enabling the implementation of the methodology on mobile devices or other applications with limited computational capacity. So I feel that the work represents a strong contribution to the field. Other strengths include:\n1. Clear and easy to read figures and tables clearly detailing the benefits of the proposed method.\n2. A detailed and clear explanation of the proposed method. It is clear what has been done, how this is motivated by previous literature.\n3. A strong experimental validation. Comparisons are made against well-recognized metrics and a the latest state-of-the-art methods. The experimental results support well the conclusions reached."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work attempts to solve a key problem with current Gaussian Splatting techniques, the computational load that occurs during rendering as a result of the non-commutativity of the opacity calculation needed during the rendering process. The authors achieve this by a novel weighting of term added to each Gaussian to replace the traditional opacity term. The weighting term allows for an order independent formulation of the computation of the final colour of the a rendered ray through the field. View-dependence is obtained by the introduction of a spherical harmonic based formulation to the new weighting term similar to that currently used in prior-art Gaussian Splatting methods for view-dependent colour. Performance is evaluated against prior-art techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This work is of quite a high quality. I do not see any specific weaknesses in the methodology or the experimental results. There is one specific minor weakness in the experimental section wherein I would like further information on the memory requirements of the proposed method. I detail the exact question I have in the \"Questions\" section below.\nOne final minor issue is the presence of minor grammar errors in the text as I mention in the \"Questions\" section below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024sortfree,\ntitle={Sort-free Gaussian Splatting via Weighted Sum Rendering},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y8uPsxR8PN},\nnote={under review}\n}"
},
"abstract": {
"value": "Recently, 3D Gaussian Splatting (3DGS) has emerged as a significant advancement in 3D scene reconstruction, attracting considerable attention due to its ability to recover high-fidelity details while maintaining low complexity. Despite the promising results achieved by 3DGS, its rendering performance is constrained by its dependence on costly non-commutative alpha-blending operations. These operations mandate complex view dependent sorting operations that introduce computational overhead, especially on the resource-constrained platforms such as mobile phones. In this paper, we propose Weighted Sum Rendering, which approximates alpha blending with weighted sums, thereby removing the need for sorting. This simplifies implementation, delivers superior performance, and eliminates the ``popping'' artifacts caused by sorting. Experimental results show that optimizing a generalized Gaussian splatting formulation to the new differentiable rendering yields competitive image quality. The method was implemented and tested in a mobile device GPU, achieving on average $1.23\\times$ faster rendering."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Sort-free",
"Gaussian Splatting",
"Weighted Sum Rendering"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/874753d2dcc0bb1e6c2c57fe99f48e16fc76d7d1.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/77c0762093213b39357d3ba6659d3f2f3c72043f.pdf"
},
"title": {
"value": "Sort-free Gaussian Splatting via Weighted Sum Rendering"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9A2TpaGsE | Language Agents Meet Causality -- Bridging LLMs and Causal World Models | main | Active | Large Language Models;Causality;Causal Representation Learning;Language Agents;Planning | applications to robotics, autonomy, planning | 5;6;6;8 | 3;2;4;4 | 3;3;2;3 | 3;3;3;3 | 3;3;2;3 | 6.25 | 3.25 | 2.75 | 3 | 2.75 | 0.4842 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Questions:\n- How do you make sure the objects in the image X are identified with the Auto Encoder? if some objects are missing the casual model cannot run properly right?\n- How does the baseline RAP work with LLama? LLama 8B does not take vision input.\n- L285: the description is confusing, why causal mapper is trained using image input?\n- Is LLM baseline just RAP? should just say RAP it in the table. Baseline LM is super confusing, there are many other simpler LLM prompting baselines in planning.\n\nTypos:\n- L269: p_\\phi(E_t | z_t) not p_\\phi(X_t | z_t)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The integration of CRL and LLM planning is novel and interesting, it is straight forward to integrate it with multiple other LLM-based search algorithms, not only RAP.\n- The paper investigates the form of action representation in the casual world model, and provides detailed results on them."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a framework that combines LLMs with CRL to improve reasoning and planning tasks. The framework leverages both LLMs' common sense knowledge and CRL's \"look-ahead\" ability on causal structures in environments. During inference, LLMs use a causal world model as a simulator for generating and processing action and state descriptions in natural language. This can be well integrated with MCTS algorithm from previous work. Tests on causal inference and planning tasks reveal that this causally-aware approach outperforms traditional LLM methods in complex, long-term planning scenarios."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Reasoning via Planning is not the sota method, [1] combines LLM as a world model and LLM as a policy model, with MCTS, should be better than RAP\n- Though not discussed in RAP, other literature [2] [3] suggest increasing the number of LLM calls will substantially improve the search algorithm results. How is the overhead and accuracy of LLM+CRL planning vs. increasing LLM calls with tree-search or iterative prompting?\n- I'm concerned about the ability of the system to work on more complex visual environment. For example, if the accuracy of the casual world model on look-ahead is limited, we should still rely on executing the actions in the real simulator and extract observation there. Related to this concern, a comparison with RAP+the real simulator execution (some oracle) is missing.\n\n[1] Zhao et al., Large Language Models as Commonsense Knowledge for Large-Scale Task Planning, 2023\n\n[2] Yao et al., Tree of Thoughts: Deliberate Problem Solving with Large Language Models, 2023\n\n[3] Zhang et al., ReST-MCTS*: LLM Self-Training via Process Reward Guided Tree Search, 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How is coordinate-based action representation implemented? A 2-d vector, or simple text like “(2,3)” encoded by the encoder?\n2. What does HB action representation look like and how does it differ from TB? An example of CB, TB, and HB would help to explain.\n3. line 377, “better sample efficiency” would allow TB to perform well in extremely low-data scenarios as well, the reviewer think the experimental results doesn’t support the claim of “better sample efficiency” of TB.\n4. In table 2, the baseline language model and the causal world model both predicts the next state in natural language, how is it evaluated against the ground-truth next state?\n5. How big is the set of annotated images used to train the causal mapper m_\\theta?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper proposed a method to learn a causal world model from a sequence of image states and text action descriptions, and demonstrated superior performance in the accuracy of the learned world model.\n- Presentation of the paper is clear and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposed to learn a causal world model from a sequence of states and actions, where the states are images and actions are described in natural language. The proposed method builds upon BISCUIT and proposes representing the actions in natural language. The paper conducted experiments in GridWorld and iTHOR environment and the R^2 scores of the learned representation are claimed to be higher in low-data regimes. Experiments also show that the learned causal world model is more accurate at predicting the next state, and improves planning performance when compared to a general LLM (Llama 3)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some components of the framework would not be available in more realistic environments, e.g. 1) a set of annotated images with ground-truth causal variables is used in training, which is likely not available as we may not know the causal variables for more realistic environments; 2) a rule-based state description generator may not be available for complex environments where we don't know what are the true causal variables.\n2. Given the simplicity of the environments, and that the proposed method is trained on these particular domains while the baseline is a general LM, the superior performance of the learned causal world model is less convincing. I would suggest comparing with a supervised fine-tuned version of the baseline LM. Since the proposed method uses a set of annotated images to train the causal mapper, supervised fine-tuning is possible with these annotated data: use the state description generator to convert the ground-truth causal variables of the states to natural language, and we can obtain a sequence of natural language action and natural language description of the states."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Can the authors elaborate on how the framework handles situations where the causal factors are not easily identifiable or where the causal relationships are highly complex?\n\nFollowing on the above point, how were the parameters chosen for the various components of the framework, and how sensitive are the results to these parameters?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The exploration of text-based action representations is particularly interesting, as it shows potential advantages in low-data regimes. I particularly liked the connection to the RL decision making problem. The results indicate that the proposed framework can potentially improve performance in causal inference and planning tasks, which is valuable for the broader ICLR community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel framework that integrates Causal Representation Learning (CRL) with Large Language Models (LLMs) to enhance reasoning and planning capabilities in interactive environments. The framework utilizes a causal world model that maps high-dimensional state representations to causal variables, which are linked to natural language expressions. This integration allows the LLM to interact with the causal world model, effectively simulating multiple possible futures before taking actions. The approach is evaluated on causal inference and planning tasks across different environments and temporal scales, demonstrating that the causally-aware method outperforms traditional LLM-based reasoners, especially for longer planning horizons."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Please see the questions"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "+ Do we have examples of decoded text (from CRL to LLM)? Is that human-readable? Is there any possible insights with analysis of the decoded text?\n+ (Clarification) In my understanding, the performance increase comes from (1) the text decoded from CRL makes it easier for LLM to reason and plan (2) the CRL model has a higher accuracy in predicting future states. If my understanding is correct, do we have ablation experiments on the effect of the two aspects?\n+ (Clarification) Does the Baseline LM refer to RAP + LLaMA 3 or LLaMA3 with other prompting methods?\n+ In Table 1, **TB** outperforms **HB** starting from 1.0%, outperforming **HB** much at **1.5%**, but performs weaker than **HB** in 100%. What are the trend between 1.5% and 100% subsample rate? Does it mean the necessity of **TB** decreases when the data scales up enough?\n+ The **HB** settings shall contain all information in the **TB** settings. Why would the model performs worse than **TB** with increased subsample percentage? Since **HB** performs better in 100% ($10^6$ image states), would it be better to use **HB** in subsequent experiments?\n+ Can you include SNA (success weighted by number of actions) in the planning results? It's a common metric to combine success and # of steps.\n+ (Typo) Page 4 footnote, prioi -> prior\n+ (Typo) Figure 1, missing an arrow from $\\bf C^1$ to Text Decoder."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper proposes to introduce CRL for world model to better understand the causal structure of environments. This intuitive idea provides a solid future approach to further improve the performance of world models, which is essential to build with intelligent agent with System 2 reasoning abilities.\nThe author analyzes the underlying math principles of the proposed CRL model and shows with experiments of the performance in metrics of comparison between induced state variables and ground-truth, and multi-step high-level accuracy of the predicted states. The experiments show large performance increase than baseline pure language model.\nThe author also use the result causal world model to do experiments on synthetic planning tasks. The results show consistent superior performance to baseline LM, especially on longer planning horizons."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The author proposed to introduce causal representation learning (CRL) to better understand the causal structure of a environment, and they incorporate such CRL with LLM to enable causally-aware planning by mapping causal representations to natural language."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ It's not clear how much the text decoded from CRL model helps the LLM reasoning. It would be clearer if the author can show some examples and/or conduct ablation experiment on this. (also see questions)\n+ The experiments compare **CB**, **TB**, and **HB** are based on the low-data scenarios, but the decision to use **TB** in subsequent experiments is for 100% data. This decision lacks a continuity of experiment settings, and it's unclear which setting would be the best for subsequent experiments. (also see questions)\n+ The paper is a good starting of causal world model studies, but I have to mention that the experiment environment is synthetic and relatively easy. It would be more fascinating to use more general environments, and there leaves doubt whether the method can be easily scaling up to more general or even real world scenarios."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Improving LLM planning capabilities using learned causal representations"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024language,\ntitle={Language Agents Meet Causality -- Bridging {LLM}s and Causal World Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y9A2TpaGsE},\nnote={under review}\n}"
},
"abstract": {
"value": "Large Language Models (LLMs) have recently shown great promise in planning and reasoning applications. These tasks demand robust systems, which arguably require a causal understanding of the environment. While LLMs can acquire and reflect common sense causal knowledge from their pretraining data, this information is often incomplete, incorrect, or inapplicable to a specific environment. In contrast, causal representation learning (CRL) focuses on identifying the underlying causal structure within a given environment. We propose a framework that integrates CRLs with LLMs to enable causally-aware reasoning and planning. This framework learns a causal world model, with causal variables linked to natural language expressions. This mapping provides LLMs with a flexible interface to process and generate descriptions of actions and states in text form. Effectively, the causal world model acts as a simulator that the LLM can query and interact with. We evaluate the framework on causal inference and planning tasks across temporal scales and environmental complexities. Our experiments demonstrate the effectiveness of the approach, with the causally-aware method outperforming LLM-based reasoners, especially for longer planning horizons."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Models",
"Causality",
"Causal Representation Learning",
"Language Agents",
"Planning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b3af4d8138f60e41a97ffc17d99bacdcaf62f711.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/4922a3e4fa9902d7ff0d5cd9d8bb3ae148ea1565.zip"
},
"title": {
"value": "Language Agents Meet Causality -- Bridging LLMs and Causal World Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9Lbr6vFHF | Bi-perspective Splitting Defense: Achieving Clean-Data-Free Backdoor Security | main | Active | Trustworthy AI;Backdoor Defense;Deep Neural Networks | alignment, fairness, safety, privacy, and societal considerations | 3;5;5;6;6 | 5;4;4;4;4 | 3;3;3;3;3 | 2;3;3;3;3 | 2;3;3;4;3 | 5 | 4.2 | 3 | 2.8 | 3 | -0.912871 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper targets a clear challenge in backdoor defense by eliminating the need for clean data, which is relevant to practical applications.\n\n- The experimental evaluation is thorough, encompassing multiple datasets, attack types, and model architectures. The comparison against existing defense methods provides a meaningful context for the method's effectiveness.\n\n- The authors conduct detailed ablation studies examining the impact of various hyperparameters, which helps in understanding the method's sensitivity and optimal configuration."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the limitation of existing backdoor attack defenses, which typically require an auxiliary clean dataset that may be difficult to obtain. The authors propose a clean-data-free, end-to-end method to mitigate backdoor attacks. Their approach leverages two dynamically identified pools of data: one from open set recognition-based splitting and another from altruistic model-based splitting. These pools are then utilized in the main training loop, which the authors demonstrate to be effective in producing backdoor-free models, even when trained on poisoned datasets.\n\nThe authors validate their method using three benchmark datasets and test it against seven representative backdoor attacks, including both dirty-label and clean-label attacks. They compare their approach with five existing backdoor defenses and evaluate performance across two model architectures: ResNet-18 and MobileNet-v2."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The study primarily focuses on supervised image classification tasks. This limitation should be explicitly stated in both the abstract and introduction to better set reader expectations.\n\n- The paper's motivation could be strengthened by acknowledging recent developments in clean data acquisition methods. Notable omissions include:\n\n *Zeng et al. (2023, Usenix Security)* on clean subset extraction from poisoned datasets\n\n *Pan et al. (2023, Usenix Security)* on backdoor data detection methods\n\n These omissions affect the paper's premise that clean data acquisition is inherently impossible.\n\n\n- Technical Issues:\n\n A typographical error in line 331: \"Mobileent-v2\" should be \"MobileNet-v2\"\n\n**References:**\n\n*Zeng et al. (2023)*. \"META-SIFT: How to Sift Out a Clean Data Subset in the Presence of Data Poisoning?\" Usenix Security, 2023.\n\n*Pan et al. (2023)*. \"ASSET: Robust Backdoor Data Detection Across a Multiplicity of Deep Learning Paradigms.\" Usenix Security, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See weaknesses above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper introduces a novel method for defending against backdoor attacks without the need for clean data, addressing a significant limitation in previous methods.\n2. Extensive experiments across multiple datasets and attack scenarios demonstrate the robustness and effectiveness of the proposed method, outperforming several state-of-the-art defenses.\n3. The paper details multiple defensive strategies that contribute to its effectiveness, such as class completion and selective dropping, which are well-integrated into the defense strategy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenge of defending deep neural networks against backdoor attacks in the absence of clean data subsets. It introduces a novel defense mechanism called Bi-perspective Splitting Defense (BSD) that uses semantic and loss statistics characteristics for dataset splitting. The approach involves two innovative initial pool splitting techniques, Open Set Recognition-based Splitting (OSS) and Altruistic Model-based Splitting (ALS), and it enhances defense through subsequent updates of class completion and selective dropping strategies. The method demonstrates substantial improvements over state-of-the-art defenses across multiple benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The complexity of the proposed method involving multiple models and sophisticated data splitting strategies could be a barrier to practical deployment and computational efficiency.\n2. While the method is empirically successful, the paper could be improved by providing deeper theoretical insights into why the specific strategies employed are effective.\n3. The effectiveness of the method might depend on specific neural network architectures, and its adaptability to different or future architectures is not fully addressed.\n4. The paper could benefit from a more detailed discussion on scenarios where BSD might fail or be less effective, which would be crucial for practical applications and future improvements."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"Yes, Privacy, security and safety"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Listed in the weakness of the paper. \n\nScore can be improved if concerns listed above are resolved."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- This paper explores a novel method to defend backdoor attacks which does not rely on clean subsets of data.\n\n- The proposed method is simple but effective and the good performance obtained by the experiments strongly supports this point.\n\n- The ablation study is organized well to clearly demonstrate the whole proposed method. And it makes the paper easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Backdoor attacks threaten deep neural networks (DNNs) by embedding hidden vulnerabilities through data poisoning. While researchers have explored training benign models from poisoned data, effective defenses often rely on additional clean datasets, which are challenging to obtain due to privacy concerns and data scarcity.\n\nTo tackle these issues, the paper proposes the Bi-perspective Splitting Defense (BSD), which splits the dataset based on semantic and loss statistics using open set recognition (OSS) and altruistic model-based data splitting (ALS). This approach enhances clean pool initialization and includes strategies to prevent class underfitting and backdoor overfitting.\n\nExtensive experiments on three benchmark datasets against seven attacks show that BSD is robust, achieving an average 16.29% improvement in Defense Effectiveness Rating (DER) compared to five state-of-the-art defenses, while maintaining minimal compromise in Clean Accuracy (CA) and Attack Success Rate (ASR)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I wonder **why the proposed BSD can effectively resist the clean-label backdoor attacks.** Clean-label backdoor attacks manipulate samples from the target class while keeping their labels unchanged. Since the BSD framework formulates backdoor defense within a semi-supervised learning context, it seems that whether the clean-label poisoned samples are part of the labeled or unlabeled subset, **the model can still associate the trigger with the target label in clean-label backdoor attacks.** Therefore, I am curious about how BSD manages to effectively resist the three clean-label attacks demonstrated in Table 2.\n\n- I recommend conducting further experiments to assess whether BSD can successfully defend against backdoor attacks **with different target labels.** This could provide valuable insights into the robustness of the defense mechanism across various scenarios.\n\n- Typos: #Line 848 --- #Line 849, 5*10e-4."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In the exp setup, it’s reported to use 7 backdoor attacks while only 3 of them are presented in Table 1. That is to say, the performance against the remaining 4 attacks on the three benchmark datasets is not shown in the paper.\n\n2. Considering the given threat model (i.e., in-training clean-data-free defense), it seems that sota defenses D-ST&D-BR [1] could also serve as baselines. These methods leverage the sensitivity of poisoned samples to transformations, which is quite different from the semantics and losses used in this paper; thus, it would be interesting to compare them and show that the proposed metrics are more accurate.\n\n3. Is the accuracy of identifying the target class reported in the paper? What if the identified class is wrong, will it affect the performance of the following steps?\n\n4. A type in Line 83: “first initialize” -> “first initializes”\n\n[1] Chen, W., Wu, B., & Wang, H. (2022). Effective backdoor defense by exploiting sensitivity of poisoned samples. Advances in Neural Information Processing Systems, 35, 9727-9737."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed defense does not require any extra clean data.\n\n2. They compare the problem of identifying clean target samples from poisoned samples with the open set recognition-based splitting problem of identifying UUCs from UKCs; inspired by which, they propose the identification mechanism."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an in-training clean-data-free backdoor defense method where the defender is required to train a clean model from scratch given a poisoned dataset without the need of any additional clean data. The key to success is to distinguish between clean samples and poisoned samples. To this end, they propose a novel identification mechanism which involves two main procedures. The first procedure is initializing a pool of clean samples and a pool of poisoned samples based on open set recognition-based splitting and altruistic model-based splitting. The second procedure is improving these pools with class completion and selective dropping strategy."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited applied scenarios: The proposed defense is limited to backdoor attacks with a single target class. This is because the first step in identification involves identifying the single target class from other classes; however, in some popular attacks like BadNet’s all-to-all attacks, all classes are target classes, which disables the proposed defense.\n\n2. The proposed method seems a bit complex as it includes two main steps—pool initialization and pool updating—and each step further involves two sub-steps, respectively. These steps aim to distinguish samples from different perspectives. Only after the total four steps, the poisoned samples are filtered out from clean samples. A complex mechanism is totally fine; but, there are two related issues: 1) does the necessity of pool updating validates that the effectiveness of pool initialization is not very good? 2) it seems that the effectiveness of each step highly depends on the performance of previous steps, e.g., if the identification of the target class is wrong, then all subsequent steps are useless. That is to say, accumulative errors may exist in the proposed method and which could lead to bad defense performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The authors have already provided comprehensive analyses and experiments.\n\nHowever, an unresolved question remains: How effectively can the proposed method be applied to scenarios involving multi-target attacks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The strengths of the proposed Bi-perspective Splitting Defense (BSD) method highlighted in the paper include:\n\n1. **Efficiency in Utilizing Available Data**: BSD makes effective use of target labels and identifies clean samples from the poisoned dataset, reducing the dependency on additional clean data, which might be scarce or raise privacy concerns.\n\n2. **Mitigation Strategies**: BSD incorporates class completion and selective dropping, helping to avoid issues like class underfitting and backdoor overfitting that could otherwise degrade model performance.\n\n3. **Balanced Performance**: While enhancing security, BSD maintains high Clean Accuracy (CA) and manages Attack Success Rate (ASR), ensuring that the model remains accurate on clean data while defending against adversarial inputs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper discusses a method to defend against backdoor attacks in deep neural networks (DNNs) by training reliable models from poisoned datasets, addressing the challenge of lacking additional clean data due to privacy concerns or scarcity. It proposes a Bi-perspective Splitting Defense (BSD) that uses open set recognition-based splitting (OSS) and altruistic model-based data splitting (ALS) to divide the dataset effectively, initializing a pool of clean samples. BSD also employs class completion and selective dropping to prevent class underfitting and backdoor overfitting. Experiments on three benchmark datasets and against seven attacks show that BSD improves Defense Effectiveness Rating (DER) by an average of 16.29% compared to five state-of-the-art defenses, while maintaining high Clean Accuracy (CA) and managing Attack Success Rate (ASR) effectively, thus providing robust security without needing extra clean data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The weaknesses identified in the paper are outlined as follows:\n\n1. **Dependency on Target Recognition**: A significant limitation of the proposed methodology lies in its dependence on accurate target recognition. This dependency introduces a vulnerability; if an advanced attack manages to circumvent the existing detection mechanisms, the entire method could become ineffective. In my opinion, the superior performance is predicated on the assumption that the target class can be accurately identified. Additionally, the framework's effectiveness diminishes when dealing with complex scenarios such as backdoor attacks that involve multiple targets, including those with dual-target labels or all-to-all attack configurations.\n\n2. **Absence of Comparative Analysis with Relevant Work**: The proposed dual-model training framework, which leverages an auxiliary model to support the primary model, shares similarities with the approach detailed in \"[1]\". However, the manuscript lacks a comparative analysis of this work. I see the proposed method has many differences from [1], but incorporating such a comparison would significantly bolster the credibility and relevance of the current submission by providing a clearer differentiation and understanding of the advantages and limitations relative to existing methodologies.\n\nReference:\n[1] The Victim and The Beneficiary: Exploiting a Poisoned Model to Train a Clean Model on Poisoned Data, ICCV 2023 Oral paper"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024biperspective,\ntitle={Bi-perspective Splitting Defense: Achieving Clean-Data-Free Backdoor Security},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y9Lbr6vFHF},\nnote={under review}\n}"
},
"abstract": {
"value": "Backdoor attacks have seriously threatened deep neural networks (DNNs) by embedding concealed vulnerabilities through data poisoning. To counteract these attacks, training benign models from poisoned data garnered considerable interest from researchers. High-performing defenses often rely on additional clean subsets, which is untenable due to increasing privacy concerns and data scarcity. In the absence of clean subsets, defenders resort to complex feature extraction and analysis, resulting in excessive overhead and compromised performance. In the face of these challenges, we identify the key lies in sufficient utilization of the easier-to-obtain target labels and excavation of clean hard samples. In this work, we propose a Bi-perspective Splitting Defense (BSD). BSD splits the dataset using both semantic and loss statistics characteristics through open set recognition-based splitting (OSS) and altruistic model-based data splitting (ALS) respectively, achieving good clean pool initialization. BSD further introduces class completion and selective dropping strategies in the subsequent pool updates to avoid potential class underfitting and backdoor overfitting caused by loss-guided split. Through extensive experiments on 3 benchmark datasets and against 7 representative attacks, we empirically demonstrate that our BSD is robust across various attack settings. Specifically, BSD has an average improvement in Defense Effectiveness Rating (DER) by 16.29\\% compared to 5 state-of-the-art defenses, achieving clean-data-free backdoor security with minimal compromise in both Clean Accuracy (CA) and Attack Success Rate (ASR)."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Trustworthy AI",
"Backdoor Defense",
"Deep Neural Networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0d3930362c0349fd81800df206e42246c23cf945.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/1acc64fa528f8121ee0e865b4f1aab70836b49f8.zip"
},
"title": {
"value": "Bi-perspective Splitting Defense: Achieving Clean-Data-Free Backdoor Security"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9Xp9NozPR | The Low-Rank Bottleneck in Attention | main | Active | Learning theory;Expressive capacity;Expressive power;Transformer;Attention | learning theory | 3;3;6;8 | 4;3;3;3 | 3;2;4;3 | 3;2;3;3 | 3;2;4;3 | 5 | 3.25 | 3 | 2.75 | 3 | -0.544331 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "N/A"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The theoretical analysis seems correct."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a theoretical analysis of the role of rank within attention mechanisms. It challenges the prevailing practice of employing low-rank attention and discusses the implications related to the selection of the number of heads. The author establishes that low-rank attention exhibits inferior performance compared to full-rank attention, indicating that the adoption of a higher rank has the potential to enhance attention performance. Preliminary experiments are conducted utilizing toy examples with synthetic data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The rank of attention is a significant hyperparameter in the design of transformers. A common convention involves the utilization of low-rank attention, typically establishing the number of heads as ( H = d/r ). This paper, however, contests this design choice, proposing that a higher rank can enhance performance. It is crucial to note that the paper does not address the speed-accuracy trade-off associated with this adjustment. It is widely recognized that high-rank attention may yield superior performance at the expense of increased computational costs. When evaluating overall performance, particularly in terms of accuracy within a predetermined computational budget, prevailing practices may ultimately provide more favorable outcomes.\n\n2. The experiments presented in this study lack robustness, as they are primarily limited to toy experiments. I would appreciate observing performance metrics derived from real-world data applied to standard transformer sizes. It is well established that theoretical performance often diverges from practical outcomes in deep learning; thus, empirical experimentation is essential.\n\n3. This work indicates that shallow transformers may experience limitations due to low-rank attention. However, it is imperative to ascertain how these limitations manifest in deep transformers, as shallow transformers are not commonly employed in practice. If this limitation has been substantially mitigated in deep transformers, it may render further examination of this issue unnecessary."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could simple experiments or additional references to other studies and conclusions be designed to intuitively show the impact of the low-rank problem on the performance of mainstream Transformer models?\n2. Could you further elaborate on how the proposed “majority voting” method for improving low-rank models enhances mainstream Transformer models and validate this with relevant experiments? For the experiments, model selection could refer to those in Appendix B.1 and the models used in [1][2], while the datasets could refer to those in [1][2] or other widely recognized benchmark datasets.\n[1] Bhojanapalli S, Yun C, Rawat A S, et al. Low-rank bottleneck in multi-head attention models[C]//International conference on machine learning. PMLR, 2020: 864-873.\n[2] Shazeer N, Lan Z, Cheng Y, et al. Talking-heads attention[J]. arXiv preprint arXiv:2003.02436, 2020.\n3. Also, please refer to weaknesses for other concerns."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. A deeper exploration of the low-rank problem in Transformer models.\n2. The paper is well written and easy to follow.\n3. Authors provide ample mathematical proofs to support their conclusions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This article examines the limitations and potential of low-rank attention mechanisms in transformer models, demonstrating that while low-rank attention heads require significantly more heads to match the performance of full-rank models in approximating functions like nearest neighbors, and these limitations can be mitigated by increasing the depth of the model. Through theoretical analysis and empirical validation, the study highlights that full-rank models inherently possess superior representational capabilities, especially with fewer heads, and suggests that adding more layers could partly overcome the deficiencies of low-rank models, though at the cost of increased computational complexity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors mentioned after Theorem 2 that the theoretical framework should be extendable to cases where N>2. Could you provide more specific explanations for the reasoning behind this inference? This would help further understand the applicability of your theory to specific problems.\n\n2. Although the authors have demonstrated theoretically and experimentally that low-rank attention models are insufficient for fitting certain functions in various scenarios and are significantly weaker than full-rank attention models, further clarification is needed on how these issues impact current mainstream Transformer models (such as the new models shown in Table in Appendix B.1), how the proposed methods in the paper apply to these models, and how performance improvements are achieved. I believe that related experimental results and methodological extensions would greatly help illustrate the contribution of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weaknesses"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Novel perspective on attention mechanisms: The paper offers a fresh perspective on the role of rank in attention mechanisms by using a simple and natural target function based on nearest neighbor search that can be represented using a single full-rank attention head for any context length, which is an interesting aspect of transformer architectures.\n\n2. Theoretical and Empirical Rigor: It combines theoretical proofs with empirical experiments, providing a robust exploration of the implications of low-rank attention on model capacity and efficiency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper \"THE LOW-RANK BOTTLENECK IN ATTENTION\" investigates the impact of the rank of attention matrices on the representational capacity of attention-based mechanisms, particularly in transformers. It challenges the common practice of using low-rank attention and proposes that the rank can significantly influence the model's ability to approximate certain functions. Specifically, the authors present a simple and natural target function based on nearest neighbor search that can be represented using a single full-rank attention head for any context length. The paper presents theoretical analysis and empirical experiments to support its claims, suggesting that increasing the rank or the number of attention heads may lead to more expressive and parameter-efficient models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The results may rely heavily on the assumption of rotational invariance in the data distribution, which may not hold in all real-world applications.\n\n2. To make it easier for readers to understand, I kindly suggest that the authors explain in more detail the differences between this paper and previous work [1].\n[1] Low-Rank Bottleneck in Multi-head Attention Models. ICML 2020\n\n3. Can the proposed method demonstrate its effectiveness on more attention-based models?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. I am wondering the reason why authors choose to analysis nearest neighbor functions and are there any other choices of functions."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "Transformer have dominated many areas but there have been few studies on the choices of numbers of attention heads and dimensions used in attention mechanism. This paper raises doubts about this which is valuable for community to pay attention.\n\n1. The paper is well written and easy to follow. In-depth theoretical explanations are provided.\n\n2. For a simple and natural target function -- nearest neighbor function, authors show low-rank attention is fundamentally weaker than full-rank attention even when choosing very large head numbers.\n\n3. Also, this paper explores the solutions to mitigate the weakness of low-rank attention"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper questions the conventional number that community use for the rank of attention matrices and the number of attention heads. Authors provide in-depth theoretical explanations and experiments to support their arguments. In the high-accuracy regime, the required number of heads is growing exponentially to remain the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper studies only is limited to shallow transformers which are not practical to large model."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We prove that attention layers with a small query/key dimension can have weak expressive power. This weakness cannot be overcome without using a massive number of attention heads."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024the,\ntitle={The Low-Rank Bottleneck in Attention},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y9Xp9NozPR},\nnote={under review}\n}"
},
"abstract": {
"value": "Attention-based mechanisms are widely used in machine learning, most prominently in transformers. However, hyperparameters such as the rank of the attention matrices and the number of attention heads are scaled nearly the same way in all realizations of this architecture, without theoretical justification. In this paper, we prove that the rank can have a dramatic effect on the representational capacity of attention. This effect persists even when the number of heads and the parameter count are very large. Specifically, we present a simple and natural target function based on nearest neighbor search that can be represented using a single full-rank attention head for any context length, but that cannot be approximated by low-rank attention unless the number of heads is exponential in the embedding dimension, even for short context lengths. Moreover, we show that, for short context lengths, adding depth allows the target to be approximated by low-rank attention. For long contexts, we conjecture that full-rank attention is necessary. Finally, we present experiments with standard multilayer transformers that validate our theoretical findings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Learning theory",
"Expressive capacity",
"Expressive power",
"Transformer",
"Attention"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/372a20c92cbd4f90e77460e8f6472635bda70f58.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/ef624b8c3cbf03475df39fefe6a8cd42d415e7b1.zip"
},
"title": {
"value": "The Low-Rank Bottleneck in Attention"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9e1tcWlme | Tackling Decision Processes with Non-Cumulative Objectives using Reinforcement Learning | main | Active | reinforcement learning;markov decision processes;discrete optimization | reinforcement learning | 3;5;5;5 | 4;3;4;4 | 2;2;2;3 | 2;3;2;2 | 2;4;3;3 | 4.5 | 3.75 | 2.25 | 2.25 | 3 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "It seems to me that if the update function u of definition 1 is a constant function, no information is preserved and the constructed MDP could not have the same value function as the NCMDP. Is this right? If it is, this sort of choice should be excluded in definition 1.\nLater in the same paragraph, the authors note that an h_t recording all reward history is sufficient to construct an MDP but the state space is exponentially large so there must be some tradeoff between state space and \"Markovness\". How much information can be lost from the full history before we lose the Markov condition? Is there a characterisation of the functions u that satisfy this property?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Definition 1 and the possible applications of the method are interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In traditional MDPs, the agent is tasked with the optimisation of the policy value function that is the expected sum of the rewards. In a non cumulative markov decision process, the value function is replaced by the expected value of an arbitrary function of the rewards. \nThe authors propose a method to translate an NCMDP into a regular MDP and show the expected returns of the two are equal and apply MDP solving methods to NCMDPs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The variables u, h and $\\rho$ are discussed in the paragraph following the statements of equations 3,4,5. You should introduce them before.\nAlso, their description is too vague. For example \"This can be achieved by extending the state space with ht , which preserves all necessary\ninformation about the reward history\" does not give me a good sense of what the function u should be. \nI suggest the statement of theorem 1 be rearranged to: assumptions then conclusion, instead of the current: assumption then conclusion then more assumptions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- The paper gives very specific examples of construction of $u$ and $\\rho$ that allows an efficient reduction from NCMDP to MDP of small augmented state size. Can you give examples of more general classes of non-cumulative objective function $f$ that induces $h_t$ that is of reasonably small dimensions?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The NCMDP setting considered in this paper fits many applications that does not directly fit into the MDP setting with cumulative rewards. Some examples include the weakest-link problem in network routing which maximizes minimum reward, the Sharpe ratio in finance which maximizes the mean divided by standard deviation. \n- The paper provides a straightforward solution to NCMDPs by first map an NCMDP to a standard MDP, which allows direct application of black box MDP solvers. \n- It provides comprehensive empirical results applying this method to a range of environments with non-cumulative reward objective, including classical control, portfolio optimization with Sharpe ratio, discrete optimization with lost cost objective."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of finding an optimal policy for a special class of decision problem called non-cumulative Markov decision process (NCMDPs) where instead of the sum of rewards, it aims to maximize the expected value of an arbitrary function of the rewards. The paper solves NCMDPs by mapping it to standard MDPs, allowing direct application of MDP solver for NCMDPs. It also performs numerical experiments of classical control, portfolio optimization, and discrete optimization that use NCMDP objectives, and shows that their method improves both training time and final performance compared with standard MDP solvers with cumulative reward."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The mapping from NCMDP to MDP provided in equations (3)-(5) augments the state with $h_t$ that represent necessary information for reward history, and is updated as $h_{t+1}=u(h_t,\\tilde{r}_t)$ and satisfies $r_t=\\rho(h_t,\\tilde{r}_t)$. For arbitrary reward function $f(r)$, an essential factor to ensure the mapping to MDP is of reasonable size is to find an efficient functional form of $u$ and $\\rho$ that summarizes this information from reward history. However, the paper only shows a list of examples of special examples of $f$ and does not specify how to construct $u$ and $\\rho$ for arbitrary function $f$ in general, and does not provide an bound to the resulting state dimensions, which at worst case must contain the set of all historical rewards. I think this makes the overall contribution of the paper limited. \n- In the experimental section, for each environment, the paper compares their method of MDP reduction (with modified state and reward) with an RL agent with cumulative objective. This does not seem like a very fair comparison given the cumulative objective will be the wrong objective in each of these environments, and it is clear that correct specification of the objective will lead to better performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Can the proposed mapping of NCMDPs to MDPs be generalized to infinite-horizon MDPs? (when $f$ is defined as a function of set.)\n\n* It seems that if we transformed an NCMDP to a standard MDP, the resulting standard MDP is governed by the dynamics of $s_t$ and $h_t$ can be viewed as an auxiliary state. I am curious whether there are some connections with the framework called MDPs with latent dynamics (see [1], [2]).\n\n[1] Simon Du, Akshay Krishnamurthy, Nan Jiang, Alekh Agarwal, Miroslav Dudik, and John Langford. Provably\nefficient RL with rich observations via latent state decoding. In International Conference on Machine\nLearning, 2019.\n\n[2] Amortila, Philip, et al. \"Reinforcement Learning under Latent Dynamics: Toward Statistical and Algorithmic Modularity.\" arXiv preprint arXiv:2410.17904 (2024)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The authors consider the problem of control in NCMDPs, which is important and understudied in the RL community.\n\n* The proposed mapping of NCMDPs to standard MDPs is simple and intuitive, and can effectively solve many problem instances of NCMDPs.\n\n* The empirical studies are concrete, showing both the necessity of considering the problem of control in NCMDPs and the effectiveness of the proposed mappings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors consider the problem of control in finite-horizon Non-cumulative MDPs (NCMDPS). They propose a general mapping of NCMDPs to standard MDP, and hence the problem can be solved by existing standard RL methods. They use a series of numerical experiments to illustrate the NCMDP problems and the proposed transformations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The paper has technical flaws, i.e., the function $f$ is ill-defined. In the original problem statement, $f$ is defined as a function on $\\mathbb{R}^T$. But later on, the authors also use notations like $f(r_1,...,r_t)$, where $f$ should be treated as a function on $\\mathbb{R}^t$. I think it may be helpful to define $f$ as a function of a set ($f(\\\\{r_1,...,r_t\\\\})$, $\\\\{r_1,...,r_t\\\\}$ is the set of $t$ rewards, $t$ can be any integer between $1$ and $T$). This works for all problem instances in Table 1.\n\n* The authors overstate their contributions. They claim their methods work for arbitrary functions $f$. However, as indicated in the previous comment, Definition 1 is invalid for general $f$ on $\\mathbb{R}^T$. Even if $f$ is treated as a function of a set, it turns out only for certain choices of $f$ there exist fix-size $h_t$ and corresponding functions $u$ and $\\rho$. I think the authors should try to find a classification of functions f with constant-size state adaptions. Even some preliminary results such as sufficiency conditions or necessary conditions would be helpful.\n\n* I think in empirical studies the authors should compare their methods with prior works on the control of NCMDPs with a specific choice of objectives and I am curious if the proposed method (which is more general) can produce comparable performances."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1) What are the off-policy implications of this method?\n2) A regular average would appear to be a common and useful non-cumulative objective to include in the paper. Was there a reason that it was not included?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper provides a theoretically-sound and easy-to-use framework that makes it possible to optimize non-cumulative objectives, by mapping such objectives to the regular cumulative formulation, such that optimizing the cumulative objective results in the optimization of the non-cumulative objective. This framework clearly has several potential use-cases in various domains. The paper is polished, well-written, and the technical concepts are explained in a relatively clear manner."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a decision-making framework that allows for the optimization of non-cumulative objectives, by mapping such objectives to the regular cumulative formulation, such that optimizing the cumulative objective results in the optimization of the non-cumulative objective. The authors show that this MDP framework satisfies important theoretical properties that make it possible to find an optimal policy, and provide several empirical experiments in support of this."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have two concerns regarding this paper:\n\nThe first of which is that while the authors do a good job explaining how their framework can be used, they fail to properly formalize the theoretical conditions that they are operating in. For example, they do not address whether the state and action-spaces are or can be finite/infinite, if there is a restriction on the type of policy that can be used (stationary, stochastic, etc.), as well as whether there are implications of their framework being used in off-policy settings. Because of this, the extent to which their framework can be utilized is unclear.\n\nMy second concern is related to the experiments. In particular, the comparison of non-cumulative objectives to the cumulative objects seems unintuitive, given that no-where in the paper do the authors claim the non-cumulative objectives are better than the cumulative objectives. Rather, it would be more intuitive, and consistent with the rest of the text, if the experiments showed that the proposed methods indeed are able to find the optimal policy for the non-cumulative objective. As such, it would appear that the experiments in the current draft of the paper do little to support the claim that the proposed methods can be used to optimize non-cumulative objectives.\n\nAs such, I am open to increasing my score if the authors can properly formalize the theoretical conditions that they are operating in, and either 1) the authors can convince me that the experiments in the current draft are adequate, or 2) the authors improve the existing experiments in such a way that they address my concerns, or 3) the authors can provide additional experiments that address my concerns."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We extend the class of problems that can be tackled through reinforcement learning by introducing a general mapping of decision processes with non-cumulative objectives to standard Markov decision processes."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024tackling,\ntitle={Tackling Decision Processes with Non-Cumulative Objectives using Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y9e1tcWlme},\nnote={under review}\n}"
},
"abstract": {
"value": "Markov decision processes (MDPs) are used to model a wide variety of applications ranging from game playing over robotics to finance. Their optimal policy typically maximizes the expected sum of rewards given at each step of the decision process. However, a large class of problems does not fit straightforwardly into this framework: Non-cumulative Markov decision processes (NCMDPs), where instead of the expected sum of rewards, the expected value of an arbitrary function of the rewards is maximized. Example functions include the maximum of the rewards or their mean divided by their standard deviation. In this work, we introduce a general mapping of NCMDPs to standard MDPs. This allows all techniques developed to find optimal policies for MDPs, such as reinforcement learning or dynamic programming, to be directly applied to the larger class of NCMDPs. Focusing on reinforcement learning, we show applications in a diverse set of tasks, including classical control, portfolio optimization in finance, and discrete optimization problems. Given our approach, we can improve both final performance and training time compared to relying on standard MDPs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"reinforcement learning",
"markov decision processes",
"discrete optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/418398a45b4ea19833ed50f139ad0836c17f9445.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Tackling Decision Processes with Non-Cumulative Objectives using Reinforcement Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9tQNJ2n1y | CASE-Bench: Context-Aware Safety Evaluation Benchmark for Large Language Models | main | Active | safety;benchmark;context;large language model;contextual integrity | datasets and benchmarks | 5;5;5;5 | 3;3;2;3 | 3;3;2;2 | 2;2;2;2 | 2;3;3;4 | 5 | 2.75 | 2.5 | 2 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Do the authors have any information on the identities/demographics of their human annotators? Ignoring settings where LLMs should decline to respond because of legal liability, the choice of whether an LLM should respond/decline seems entirely subjective. Different individuals will have different assessments, because of their personal politics, religious beliefs, cultural backgrounds, and more. So largely, this benchmark would seem to measure acceptance/decline accuracies grounded against the specific human pool the authors surveyed. Having more information about this pool would be helpful! \n- The underlying message of the paper is that context matters in determining whether an LLM should respond to a query. For instance, an LLM intended to train medical professionals should probably respond to medical questions. Why is it fair, then, to evaluate LLMs like GPT-4o-mini or Claude, which are intended to be used in the “layperson chatbot” context? I’m not sure I see why merely providing a “context” to these chatbots should actually alter their behavior. Given that a prevailing concern with these models is that users might employ deception to elicit responses from the models that developers do not desire (i.e., via jailbreaking or prompt-engineering)–shouldn’t these LLMs also decline to respond when the context is provided? The argument that these LLMs should respond when context is provided presumes that users will always be truthful when providing information in the prompt. \n- Could the authors share more information on the contexts?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Overall the paper is extremely well-written and clear.\n- The core idea tackled by the paper–understanding how context might (or should) affect answer/decline decisions by LLMs–is interesting.\n- The study methodology is extremely rigorous. \n- The experiments are also well-done and interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "- The paper develops a benchmark for evaluating LLMs’ refusal accuracy for problematic queries. The key point of differentiation is that the authors focus on “context.” This is the idea that auxiliary information might change whether or not an LLM should refuse to answer a query. \n- The paper evaluates a range of LLMs on this benchmark, and finds that LLMs are generally over-moderated.\n- They also conduct an extensive survey of individuals to measure how context influences perceptions regarding when LLMs should refuse to answer."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper tries to ground in the formal framework offered by contextual integrity (CI) theory. Section 2 and 3.1 spend time discussing CI and mapping it to LLMs. But its not obvious why CI is helpful here or the extent to which CI is even used in the construction of the benchmark? As best I can tell, CI is explained in the prompt used to generate conversational contexts. But it isn’t clear whether that’s necessary, or whether GPT-4o is even using that information. And the paper also mentions that all contexts were revised by the authors themselves.\n- The primary contribution (from a data perspective) would seem to be the different contexts the authors define–especially as the queries themselves come from existing work. The paper doesn’t really discuss what these contexts look like, how they were crafted, or how often they repeat across the different queries?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How reproducible is the automatic content generation with an open source model? Have you tried this?\n2. Manual revision process appears to be expensive. Were there attempt to automate this? If so, with what level of success?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- extensive benchmark collection, with 900 distinct contexts with queries from 45 different categories.\n- the inheritance of contextual integrity theory is interesting and well motivated\n- over 47K human annotations from over 2K annotators, resulting in a high quality dataset"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper puts forth CASE-Bench, a Context-Aware Safety Evaluation Benchmark which integrates context into subsequent safety assessments of LLMs. The benchmark boasts 900 distinct contexts with queries from 45 different categories."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- the data was created with gpt-4o, bringing into question the potential for commercial use\n- explanation of limitations is lacking, would be good to delve into potential areas for improvement or future work"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper identifies a key issue in current LLM safety evaluations: too much focus on rejecting single queries without considering context, which can lead to LLMs over-rejecting legitimate requests.\n2. The research methodology is solid, using Contextual Integrity theory to formalize context, power analysis to determine annotator numbers, and various statistical methods to verify result significance.\n3. The dataset creation process is thorough, with 900 query-context pairs, 21 annotators per task, and robust quality control measures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces CASE-Bench, a context-aware benchmark for evaluating LLM safety. The main innovation is incorporating context into safety assessments using Contextual Integrity theory. The study validates the significant impact of context on safety judgments through large-scale human annotation (2000+ annotators) and evaluates several open-source and commercial LLMs on this benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I wonder how the authors address this potential issue: many current jailbreaking techniques [1,2] actually work by creating \"safe-looking\" contexts to trick LLMs into generating dangerous content. Could context-based safety evaluation create new security risks?\n2. Regarding metrics, the paper mainly uses basic measures like accuracy and recall. Have the authors considered adding more detailed analysis, like measuring the impact of different types of errors?\n3. From a practical standpoint, I'm curious about the authors' thoughts on how to integrate this context-based evaluation approach into real-world LLM safety mechanisms?\n\n[1] Scalable and Transferable Black-Box Jailbreaks for Language Models via Persona Modulation\n[2] Quack: Automatic Jailbreaking Large Language Models via Role-playing"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In which use-cases you find the differentiation by context important/useful?\n2. How can malicious actors that fake positive intent could be detected?\n3. In the results in Fig. 3, if I understand correctly, some unsafe contexts lead some categories to rated by humans as safe. That's unexpected and counterintuitive (categories between 10-20 on the x axis). Do you have an explanation for this? Can you share some examples from this category (I couldn't find any)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "A thorough analysis of the influence of context in deciding whether a prompt/prompt response is problematic\nA dataset that may be useful for other research"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of understanding whether a prompt/prompt reply is problematic or not given a particular context. It proposes that not all responses are problematic and some responses are problematic only if they are in a particular context. As such, it creates a dataset on 45 different categories, all with positive and problematic context. The dataset is used to evaluate state of the art models and also alignment/agreement with human annotators."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I'm personally not convinced that the context should dictate the perceived harm in a prompt/prompt response. A malicious actor could always fake a positive intent/context to get the information they want and create harm. As such, I find the premise/motivation of the work weak."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A context-aware safety evaluation benchmark for safety evaluation that considers context."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024casebench,\ntitle={{CASE}-Bench: Context-Aware Safety Evaluation Benchmark for Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=y9tQNJ2n1y},\nnote={under review}\n}"
},
"abstract": {
"value": "Aligning large language models (LLMs) with human values is essential for their safe deployment and widespread adoption. Current LLM safety benchmarks often focus solely on the refusal of individual problematic queries, which overlooks the importance of the context where the query occurs and may cause undesired refusal of queries under safe contexts that diminish user experience. Addressing this gap, we introduce CASE-Bench, a Context-Aware Safety Evaluation Benchmark that integrates context into safety assessments of LLMs. CASE-Bench assigns distinct, formally described contexts to categorized queries based on Contextual Integrity theory. Additionally, in contrast to previous studies which mainly rely on majority voting from just a few annotators, we recruited a sufficient number of annotators necessary to ensure the detection of statistically significant differences among the experimental conditions based on power analysis. Our extensive analysis using CASE-Bench on various open-source and commercial LLMs reveals a substantial and significant influence of context on human judgments ($p<$0.0001 from a z-test), underscoring the necessity of context in safety evaluations. We also identify notable mismatches between human judgments and LLM responses, particularly in commercial models within safe contexts. Code and data used in the paper are available at https://anonymous.4open.science/r/CASEBench-D5DB."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"safety",
"benchmark",
"context",
"large language model",
"contextual integrity"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3121a47f2b3b9fe2b5f7a2502c7a347404ea5730.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "CASE-Bench: Context-Aware Safety Evaluation Benchmark for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
y9xNQZjUJM | Collaborative Theorem Proving with Large Language Models: Enhancing Formal Proofs with ProofRefiner | main | Desk Reject | Agent-base System;Reasoning | transfer learning, meta learning, and lifelong learning | Haoyi Zhang;Andrew Liu;Zixuan Wang;Feiyang Wang | ~Haoyi_Zhang1;~Andrew_Liu8;~Zixuan_Wang17;~Feiyang_Wang4 | 0 | 0 | 0 | 0 | 0 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": {
"value": "The paper is desk rejected for significant textual overlap with [1]. This decision was confirmed by multiple members of the program committee. Table 1 in both papers is identical. The related work sections also have significant overlap. The amount of textual overlap without proper attribution makes this a case of plagiarism. \n\n[1] Towards Large Language Models as Copilots for Theorem Proving in Lean by Peiyang Song et al. https://arxiv.org/pdf/2404.12534"
},
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "Submission Desk Rejected by Program Chairs"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "agent-based system for math reasoning"
},
"_bibtex": {
"value": "@misc{\nzhang2024collaborative,\ntitle={Collaborative Theorem Proving with Large Language Models: Enhancing Formal Proofs with ProofRefiner},\nauthor={Haoyi Zhang and Andrew Liu and Zixuan Wang and Feiyang Wang},\nyear={2024},\nurl={https://openreview.net/forum?id=y9xNQZjUJM}\n}"
},
"abstract": {
"value": "Abstract: Theorem proving presents a significant challenge for large language models (LLMs) because formal proofs can be rigorously verified by proof assistants like Lean, leaving no room for errors. Existing LLM-based provers typically operate autonomously, but they often struggle with complex and novel theorems where human insights are crucial. We propose a new framework that positions LLMs as collaborative assistants in theorem proving to address this. This framework enables the seamless integration of LLM inference into the Lean environment, allowing developers to build various proof automation tools. These tools offer features such as suggesting proof steps, completing intermediate goals, and selecting relevant premises, thereby enhancing the theorem-proving process. Users can leverage our pretrained models or integrate their own, supporting local and cloud-based execution. Experimental results demonstrate that our approach is more effective in aiding humans and automating the theorem-proving process than existing rule-based systems. Additionally, we introduce a system called ProofRefiner, which refines questions and answers through dynamic dialogue adjustments to ensure relevance and precision."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Haoyi_Zhang1",
"~Andrew_Liu8",
"~Zixuan_Wang17",
"~Feiyang_Wang4"
]
},
"authors": {
"value": [
"Haoyi Zhang",
"Andrew Liu",
"Zixuan Wang",
"Feiyang Wang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Agent-base System",
"Reasoning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "zhang|collaborative_theorem_proving_with_large_language_models_enhancing_formal_proofs_with_proofrefiner"
},
"pdf": {
"value": "/pdf/39e133160ee46970609314d991c12a9d9f626400.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Collaborative Theorem Proving with Large Language Models: Enhancing Formal Proofs with ProofRefiner"
},
"venue": {
"value": "ICLR 2025 Conference Desk Rejected Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Desk_Rejected_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
||||||||||
yAN2oPHs7y | Neuro-Symbolic Rule Lists | main | Active | Neuro-Symbolic;Rule Induction; Intepretability | interpretability and explainable AI | 5;5;5;6 | 5;4;4;4 | 2;3;2;3 | 2;3;2;3 | 3;4;3;3 | 5.25 | 4.25 | 2.5 | 2.5 | 3.25 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Feel free to discuss any of the main weaknesses I listed above.\n\n- line 204: \"In the end, we seek to obtain strict logical rules for use in a rule list.\" If I understand correctly, once the temperature is low enough, the learned predicates will already be \"almost\" discrete. At this stage, couldn't you just replace them with their hard counterparts, and obtain a hard rule list classifier? The lower the temperature, the closer the original soft RL classifier and the corresponding hard RL classifier will behave in terms of predictions and loss, meaning there's little to be gained, performance-wise, from using the soft version, and much to be gained (for the few inputs that fall in the affected region of input space), interpretability-wise, by using the hard version. So why not do that? Have you evaluated empirically what happens if you follow this route?\n\n- Appendix C: what split did you use for evaluating performance during grid search?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "**Originality**: NyRules combines existing ideas and improves on them. The level of novelty is acceptable.\n\n**Quality**: All technical contributions appear to be good. The experiments are carried out on several \"real-world\" data sets as well as on synthetic data, and the results are 5-fold cross-validated (which is refreshing; however: why 5 and not 10+?). The choice of competitors is also sensible. Performance improvement on \"real-world\" data is rather robust, although not dramatic in most cases (see also Weaknesses).\n\n**Clarity**: The text is generally well written, and figures/plots are altogether clean and useful. However, parts of the manuscript feel a bit rushed: I was puzzled by a few equations (which are not always entirely formal) and statements, see below.\n\n**Significance**: NyRules contributes to research on differentiable rule learning, well motivated by explainability requirements. The contribution is neither niche nor exceedingly significant."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Neuro-Symbolic Rule Lists (NyRules), an upgrade to standard rule\nlists -- a well known class of white-box classifiers -- that in addition to learning\nrules also learn how to discretize continuous input features automatically. (This\nis where the \"neuro-symbolic\" moniker comes form.) This feat is achieved by\nintroducing a continuous relaxation of rule list learning, which is combinatorial\nin nature. NyRules is similar in spirit to existing differentiable rule learning\napproaches, except it focuses specifically on rule lists and reuses/improves upon some\nkey techniques: 1) differentiable thresholding functions from Yang et al.,\nused as-is; 2) the differentiable conjunction-of-a-subset operation from Xu et al., improved\nto avoid vanishing gradients; 3) linear relaxation + annealing to enable end-to-end learning of\nthe rule list. NyRules are evaluated on several data sets against several\ncompetitors."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Quality**: \n\n- There is a pretty big issue with Table 1, which I wish sorted out: in many situations, XGBoost performs *much* better than any of the competitors, yet it is never marked in bold in Table 1. This is the case also for the rings dataset. This choice is not explained in the text (I may have missed it!) and risks conveying a twisted perception of performance improvement.\n\n I understand that boosted trees can be less interpretable than rule lists, and that -- since the paper does not attempt to evaluate interpretability of learned models at all -- this cannot be demonstrated. But as a result, the results hyperfocus on prediction accuracy, where NyRules does *not* have an edge over XGBoost (to the best of my understanding).\n\n **Suggestion**: the authors should explicitly explain in the paper why XGBoost results are not bolded, even when they outperform other methods, and clarify that the focus is on comparing interpretable rule-based methods, with XGBoost included as a non-interpretable benchmark. Adding this explanation would provide important context and avoid potential misinterpretation of the results.\n\n- A key issue is that there is not specific study of the impact of the threshold learning step, which is a big selling point of the proposed method. I am left wondering whether it is indeed important for performance and interpretability.\n\n **Suggestion**: the authors could conduct an ablation specifically focused on the threshold learning component by, e.g., comparing NyRules with a version that uses pre-discretized features, to directly demonstrate the impact of learned thresholds on performance and potentially on interpretability.\n\n- The authors equate rule length with interpretability, but this is not exactly exact.\n\n **Suggestion**: the authors should acknowledge this limitation in the Limitations section discussing that while rule length is used as a proxy for interpretability, a more comprehensive evaluation of interpretability, ideally including user studies, would provide a more accurate assessment.\n\n**Clarity**:\n\n- NeSy is **neuro**-symbolic because the perceptual component -- responsible for encoding low-level inputs such as images into high-level symbolic variables -- is implemented using neural networks. In this paper the perception component is minimal: it corresponds to the thresholding step. I honestly feel calling the proposed method \"neuro-symbolic\" is a stretch, and could be interpreted as disrespectful of what people in NeSy AI aim to do. I am not going to penalize the paper based on this, but I wanted to make it clear that I did not appreciate this choice.\n\n- Some equations are repeated unnecessarily (the definition of r(.) and eq. 2).\n\n- p2: Last equation: $\\alpha \\le x_i \\le \\beta$ is not a function, it is a condition; please add an indicator function. This also applies to line 184.\n\n- Eq 1: rl(.) is undefined.\n\n- Eq 1: the \"s.t.\" does not make much sense, because this is neither a satisfiability nor an optimization problem. It should be replaced with an if-and-only-if. In short, I'd write:\n$$\n rl(x) = c_j \\qquad \\Leftrightarrow \\qquad \\exists j s.t. (a_j(x) = 1) \\land \\forall i . p_i > p_j . (a_i(x) = 0).\n$$\nSince priorities are unique, the condition $i \\ne j$ is unnecessary. Actually, in its current form the equation seems incorrect, as it requires lower-priority conditions not to fire (i \\ne j, p_i < p_j), which to the best of my understanding is not needed. Or did I get this wrong?\n\n- line 188: mangled sentence.\n\n- line 262: what does the argmax_l act on? The argument is c.\n\n- line 278: same.\n\n- I cannot follow the comment at line 280: what is the point of talking about boosted rule sets at this point? This is the only place in the paper where they are mentioned.\n\nI am willing to increase my score provided the authors address the main issues I raised."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Answers / changes following the major comments listed above will address most of the limitations of this current version of the paper.\n\nAdditional questions:\n- The relevance of the predicate layer is highlighted for the Ring dataset that has exclusively continuous features. Did you observe a drop in performance compared to other methods for datasets with no continuous features ? \n- Have you tried other differentiable logical conjunction layers ?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is clearly written and of great quality.\nThe solution proposed to solve the vanishing gradients for the chosen computation of the conjunction is well formulated and the efficiency of the relaxation proven with the ablation study.\nSupplementary material (with code) is provided for reproducibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper paper proposes a novel end-to-end differentiable model to learn rule lists called NyRules and addresses limitations from existing work.\nThe method takes into account the discretisation of continuous features into predicates, the learning of conjunctions & rules as well as the ordering of the rules into rule lists.\nNyRules performances were validated on both synthetic and real world datasets and compared to existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major comments:\n- Discretization on the fly has already be done in existing works (for example: _Kusters, Remy, Yusik Kim, Marine Collery, Christian de Sainte Marie, and Shubham Gupta. \"Differentiable Rule Induction with Learned Relational Features.\" In International Workshop on Neural-Symbolic Learning and Reasoning. 2022_). However, the thresholding layer presented in this paper is still very relevant and different from _Kusters et al_ approach for instance.\nThe paper's contributions need to be put into perspective. Also a comparison between the two different thresholding layers would strengthen the quality of the paper.\n- The improvements provided by the rule ordering are not proven. An additional ablation study would be required in order to differentiate the impact of the predicate layer and the active priority. Especially for comparison with RLNet which is also a neural-based approach to learn rule lists.\n- The final else (with no predicates) in the rule list is not described nor explained.\n- The fact that $l=2$ (binary classification) in the entire paper should be mentioned from start explicitly. It is indirectly mentioned only in 3.4 where the BCE loss is specified, in 5.1 where only datasets for binary classification are used and in future works (6.2). The reader is told this method is applicable for multi-classification with parameter $l$.\n- The interpretation of $c_j$ in the rules is not explicit. \"if $a_j$ then $c_j$\" is then converted in Figure 1 into \"if [...] then P(Disease) = 94%\" for example. Where is that value coming from ? softmax of $c_j$ ? I believe it needs to be explicit.\n\nOther comments:\n- Related work is lacking contributions from different fields. For instance, fuzzy logics with studies like _van Krieken, Emile, Erman Acar, and Frank van Harmelen. \"Analyzing differentiable fuzzy logic operators.\" Artificial Intelligence 302 (2022): 103602._ which analysed different differentiable logical conjunctions.\n- The description of the temperature annealing schedule seems to be missing.\n- In 3.3 the number of rules $k$ and the number of classes $l$ _are_ (typo?) fixed beforehand. \n- Figure 3.c, X1=1 and X2=1 = 0 ?\n- References to datasets sources are missing\n- Pre-processing of the datasets is not described. How are the datasets continuous feature discretised for other methods ?\n- The distribution of the features types in the datasets could be provided to highlight the relevance of the predicate layer of NyRules."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Is there a reference for GREEDY? What implementations of the algorithms did you use?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- nice formal presentation of the work"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a rule learning system for numerical data, that integrates discretization and learning. This is, by itself, not a novel contribution. Classic rule learning algorithms such as Ripper can do that as well. It also optimizes the rule order of the resulting list, which is a somewhat debatable contribution, because rules in a decision list are not independent of the order. This is also a problem for interpretability, BTW. Results are presented in F1-scores, which is somewhat unconventional for work in this area, and makes a comparison harder. I also miss a comparison to the (still) standard benchmark Ripper, in the Java implementation available, e.g., in Weka (this is important, the Python implementation Wittgenstein is considerably worse, and unfortunately many of the published claims of superiority over Ripper are based on that implementation). As a result, I find it hard to assess the value of this work. I do think that the paper (although generally well written) could be much stronger and more credible with respect to the experimental evaluation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- experimental results are somewhat non-standard, in particular the use of F1. Why do you think F1 is a good measure for evaluating classification problems? It is quite adequate for information retrieval, because there is an asymmetry between the positive and negative class, so that it is o.k. that unretrieved irrelevant documents do not influence the result, but I don't see why ignoring true negatives should be a good idea in any of the problems studied here.\n\n- The results also seem to be flawed at times. For example, results of CORELS at 0.33 or 0.27 when other algorithms are above 0.9 (similar for SBRL) indicate for me that the algorithms have not been adequately used. Possible explanations could be that they did not learn any rules, or learned for the wrong class, or similar. In my view, no algorithm can be as bad as 0.16 for Titanic. I guess you could invert its predictions and would get a better result (this would be clearer if accuracy had been used as an evaluation metric).\n\n- The authors seem to assume that decision lists are just an ordering of rules. Of course, an ordered rule set is a decision list, but the opposite is not true. This affects optimality and interpretability. Consider, for example the decision list\nIF A then +\nELSIF B then -\nELSIF C then +\nELSE -\nNote that each rule is only valid in context. It is, e.g., not true that all C's are positive, only if they are not B.\nIf you simply reorder the rules in the decision list (e.g., by swapping the two positive rules) this may drastically change the semantics. This is why classical rule learning algorithms like Ripper always learn a decision list in Sequence (Ripper does optimize it later on). \n\nOn the other hand, you could also rewrite the decision list as a rule set as follows:\nIF A then +\nIF B and NOT A then -\nIF C and NOT B and NOT A then +\nIn that case, you could reorder the rules as you wish. If rules like this are learned, they would be longer (which would be consistent with what the authors observed on their algorithm), but, on the other hand, the ordering is practically irrelevant.\n\nSo probably, the authors learn a mixture of both (optimizing both the order and the rule bodies at the same time), but this should be discussed deeper.\n\n- The ordering also has an effect on interpretability, of course. A long decision list cannot be easily interpreted, because the rules cannot be interpreted independently, you always have to interpret them in context of all previous rules. So the claim already posed in the introduction that decision lists are interpretable is debatable, in particular for long lists. It does hold if rules can be interpreted independently, but in that case the order would be irrelevant (see above). The authors should more clearly discuss the trade-off between ordered rules and interpretability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Q1. Could you show the experimental results on the average running times of the proposed method to the baselines? \n- Q2 (optional). From the objective function in Section 3.4, the proposed method does not explicitly impose constraints on the sparsity of predicates in each rule. However, from Figure 5(b), the experimental results suggest that the proposed method tends to obtain sparse rules, which was surprising to me. Did you carry out any post-processing for it? In addition, I guess that the required length of each rule to maintain sufficient accuracy may vary depending on the dataset. Does the trend in Figure 5(b) differ depending on the dataset?\n- Q3 (optional). In the learning problem of the proposed method, the constraints on minimum and maximum supports are relaxed as soft constraints (i.e., regularizer). In the experiments, how often did the learned rule lists violate the constraints on minimum and maximum supports? Also, while the learned rule lists may not violate this constraint if we set $\\lambda$ to be a sufficiently large value, how does it affect the accuracy or computational cost of the proposed method?\n- Q4 (optional). If I understand correctly, the proposed learning algorithm gradually decreases the annealing temperatures $t_{\\pi}$ and $t_{rl}$. Could you show the pseudo-code of a procedure for determining the values of $t_{\\pi}$ and $t_{rl}$ in each step during training?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, I like this paper, and I think the authors make solid contributions to the community on interpretable machine learning. \n- S1. This paper is well-written, well-organized, and easy to follow. \n- S2. The authors introduce a differentiable representation of rule lists, which is a novel approach to the best of my knowledge. While the proposed representation mainly consists of existing methods, including [Yang et al. 2018], [Xu et al. 2024], and [Jang et al. 2017], it also includes some unique techniques, such as relaxed formulation for alleviating the vanishing gradients issues. In addition, I think the illustrative demonstration of each continuous relaxation (Figures 3 and 4) helps readers understand the effectiveness of the proposed approach well. \n- S3. The experimental results demonstrated that the proposed method often achieved better accuracy than the existing methods. The proposed method also attained comparable accuracy to XGBoost, which was surprising to me."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new framework for learning rule lists by gradient descent, named NyRules. First, the authors introduce a differentiable representation of a rule list using some existing techniques, such as the Gumbel-Softmax. Then, they formulate the task of learning a rule list that minimizes the empirical risk and the constraints on the support as a continuous optimization problem. One of the advantages of the proposed method is that it does not require pre-processing steps such as discretization and rule mining, which are required by most existing methods and often incur additional computational costs. Experimental results demonstrated that the proposed method achieved good accuracy compared to the baselines across all the datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While I think this is a good paper, I believe the following concerns should be addressed to improve the quality of this paper: \n- W1. One of my major concerns is the computational cost of the proposed method. I could not find the experimental results on the running times of the proposed method and baselines, even in the appendix. To claim that the proposed method overcomes the scalability issue of the existing methods, I think the running time of the proposed method should be compared with the existing methods. Although I agree that it is a desirable advantage that the proposed method does not require discretization and rule mining as pre-processing, I am worried about the scalability of the proposed method because its average running time was not reported in the paper. \n- W2. This paper looks to be missing descriptions of some important information. For example, this paper seems to assume a binary classification task implicitly. I think it should be explicitly mentioned at, for example, the beginning of Section 2. In addition, as mentioned above, this paper seems to lack the experimental results on the computational times of the proposed method and baselines. I think such information should be reported because scalability is one of the important factors for practitioners when deciding which algorithm they should use for their task."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce NyRules, an end-to-end trainable model that unifies discretization, rule learning, and rule order into a single differentiable framework."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024neurosymbolic,\ntitle={Neuro-Symbolic Rule Lists},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yAN2oPHs7y},\nnote={under review}\n}"
},
"abstract": {
"value": "Machine learning models deployed in sensitive areas such as healthcare must be interpretable to ensure accountability and fairness.\nRule lists (_**If**_ $\\texttt{Age} < 35 \\wedge \\texttt{Priors} > 0$ _**then**_ $\\texttt{Recidivism} = $True, _**else if**_ \"Next Condition\" ...)\noffer full transparency, making them well-suited for high-stakes decisions.\nHowever, learning such rule lists presents significant challenges. Existing methods based on combinatorial optimization require feature pre-discretization and impose restrictions on rule size. Neuro-symbolic methods use more scalable continuous optimization yet place similar pre-discretization constraints and suffer from unstable optimization. To address the existing limitations, we introduce NyRules, an end-to-end trainable model that unifies discretization, rule learning, and rule order into a single differentiable framework. \nWe formulate a continuous relaxation of the rule list learning problem that converges to a strict rule list through temperature annealing.\nNyRules learns both the discretizations of individual features, as well as their combination into conjunctive rules without any pre-processing or restrictions.\nExtensive experiments demonstrate that NyRules consistently outperforms both combinatorial and neuro-symbolic methods,\neffectively learning simple and complex rules, as well as their order, across a wide range of datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neuro-Symbolic;Rule Induction; Intepretability"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/142754718917cfd6b93513ef46d12dc79268e987.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/f889ceb6510d333a99b87ca86b3e449c309628f7.zip"
},
"title": {
"value": "Neuro-Symbolic Rule Lists"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yAU5X77S06 | ADMP-GNN: Adaptive Depth Message Passing GNN | main | Active | Graph Neural Networks | learning on graphs and other geometries & topologies | 3;3;3;6 | 3;4;4;3 | 3;2;2;3 | 3;2;1;3 | 3;3;2;3 | 3.75 | 3.5 | 2.5 | 2.25 | 2.75 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the discussed weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Strengths:\n1. As said by the authors, it is sensible that adaptivity is important (in NNs and also in GNNs).\n\n2. The adaptivity mechanism itself (not the method as a whole) seems simple (which is good)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an adaptive message-passing scheme that allows the updating of nodes in an independent way with respect to the number of message-passing steps (layers). The authors propose several ways to train the network using this mechanism, and several test time usages with different encodings. The paper concludes with several experiments to demonstrate the performance of ADMP-GNN."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n\n1. The paper lacks discussions on highly relevant papers that suggest adaptivity in GNNs, from the message-passing (most important for this paper), and other mechanisms like the activation function and the normalization layer. Please see references R1-R5 below. \n\n2. Regarding W1, the papers R1 and R2 seem to do the same basic idea as here (even if the implementation is a bit different), so it does not seem novel, and it is also not discussed in this paper.\n\n3. Adaptivity in message passing can also be obtained with graph transformers/graph attention layers because they can adaptively choose which nodes should be discussed with which, but the authors did not discuss them in the paper or within the experiments. Similarly, the method should be compared with rewiring methods.\n\n4. It is not clear what the main benefit of this method is beyond adaptivity (which is important). For example, there is no discussion on which inherent problems in GNNs it can solve, like over-smoothing or over-squashing, and there is also no discussion on the expressiveness of this method.\n\n5. Regarding the design of the adaptive mechanism (section 3.2), it is interesting to know what the network actually learns to adapt to and how. However, this was not shown in the paper, so it lacks insights.\n\n6. The time complexity of the method is quite high. Although it is discussed by the authors, they do not properly compare the required runtimes with other methods, and they should also compare their performance with other methods that require more time, such as graph transformers and subgraph methods.\n\n7. I am a bit concerned about section 3.5; it shows heuristics to use the method on test nodes, however these proposed heuristics and encodings were also used in other methods and were shown to be quite strong, for example, the 'Walk Count' was used in the well-known RWSE encoding (see R6 for reference), and I think that it is hard to disentangle the results obtained by these approaches from the actual method proposed here, which is the adaptive message passing mechanism.\n\n**Regarding experiments, I have several concerns:**\n\n8. First, it is only conducted on up to 5 layers. However, most GNNs perform quite well in this regime (e.g., see R6). It is more interesting to know how this method performs when there are many layers, like 64,128,256, etc.\n\n9. It is not clear which splits were used. While the authors do say they use 10 seeds, the splitting of the data is not clear and does not seem to be in line with known splits of the datasets used here (e.g., as in R7). \n\n10. The results are poorly compared with relevant methods, as previously discussed, and this makes it hard to quantify the contribution of the proposed method properly. Also, the uncertainty on which splits were used in the experiments makes it harder to compare with known results from previous papers.\n\n11. The datasets used here were shown to suffer from multiple problems (see R8, R9), and therefore, showing results on additional and more up-to-date datasets is required. I would strongly suggest trying to benchmark your method on those shown in R9 and additional tasks like graph classification or regression. \n\n12. The ablation study is discussed in the main paper, but the results are missing.\n\n[R1] Adaptive Message Passing: A General Framework to Mitigate Oversmoothing, Oversquashing, and Underreaching\n\n[R2] Cooperative Graph Neural Networks\n\n[R3] Improving Expressivity of GNNs with Subgraph-specific Factor Embedded Normalization\n\n[R4] GRANOLA: Adaptive Normalization for Graph Neural Networks\n\n[R5] DiGRAF: Diffeomorphic Graph-Adaptive Activation Function\n\n[R6] A Survey on Oversmoothing in Graph Neural Networks\n\n[R7] Geom-GCN: Geometric Graph Convolutional Networks\n\n[R8] Pitfalls of Graph Neural Network Evaluation\n\n[R9] A critical look at the evaluation of GNNs under heterophily: Are we really making progress?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How well does the centrality-based layer selection policy algin with the one given in the oracle accuracy?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-written and the adaptive depth aggregation is well-motivated."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores using node-wise adaptive depth aggregation for GNN. To this end, a progressive training strategy is proposed, and the proposed method is evaluated on several benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "It is understandable that different nodes may benefit from different depths of aggregation, and the results in Table 2 show that the maximum test accuracy by using the aggregation results from different layers is much better than what is achieved by using the aggregation results from a single layer. However, the main challenge is determining the optimal depth for each node. The heuristic method proposed in this paper utilizes clustering and then uses the validation accuracy for each cluster to determine the optimal depth for the whole cluster that can easily not be optimal, e.g., maybe, in some clusters, the validation nodes are not too small and not representative enough. Indeed, the accuracy of the proposed method is not much better than the GCN baseline."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "N/A"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The idea is novel to me. However, since I haven’t worked on GNNs since 2022, I’m unable to provide an accurate assessment of its novelty.\n\n2. The paper proposes a sequential training approach, which helps reduce computational complexity.\n\n3. To improve generalization on test nodes, the paper introduces several heuristic metrics—k-core, PageRank, and Walk Count—to determine the optimal number of aggregation layers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The key idea of this work is to use varying numbers of message-passing steps to aggregate information uniquely for each node, allowing different nodes to utilize different aggregation and propagation layers. After a certain number of layers, the model first calculates the loss for these nodes, performs backpropagation, and then freezes their gradients to avoid conflicts in subsequent aggregation layers. The paper proposes an adaptive framework to determine the optimal number of layers for each node."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The improvement is somewhat limited on certain datasets, such as Cora and Citeseer.\n\n2. The paper is under 9 pages, and, in my view, it feels somewhat incomplete."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- When executing the experiments for figure 1, are the L different GNNs trained with the same dynamics? (batch size, epochs, learning rate ...) Because deeper models might be harder to train. See also [5], these authors questioned the oversmoothing issue, and they claim it's just trainability issue for deep networks. \n- In figure 1, I can understand the green curves have an upside-down V shape, too few layers lead to under-reaching, and too many layers lead to over-smoothing. But why are the blue curves V shape? It does not make sense to me, why it drops first and adding more layers is better again. \n- In line 193-194, is there a motivation why no hidden representations h is included in the exit function?\n- In line 236, the authors mentioned about the conflicts in gradients. Is there any proof or related work mentioning this?\n- I wonder if stochastic depth [6] training also helps, which can be a candidate to your sequential training trick. \n- Is this work directly applicable to graph property prediction, like, just by adding a pooling layer? If yes, why no experiments on graph prediction tasks? If not, why?\n- In line 313, 316, what is ADMP-GNN PT? \n- Some experiment results seem pretty bad, for example, in table 1, CS column, 2 layers, ADMP-GNN ALM is much worse than GCN(*). Why? Because of the gradient conflicts?\n\n[5] Peng, Jie, Runlin Lei, and Zhewei Wei. \"Beyond Over-smoothing: Uncovering the Trainability Challenges in Deep Graph Neural Networks.\" arXiv preprint arXiv:2408.03669 (2024).\n[6] Huang, Gao, et al. \"Deep networks with stochastic depth.\" Computer Vision–ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11–14, 2016, Proceedings, Part IV 14. Springer International Publishing, 2016."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Overall, the writing is pretty good and clear.\n- The motivation about adaptive layer for each node is convincing. \n- The synthetic dataset experiment is pretty well designed. \n- The architecture is efficient and has the same complexity as a normal GNN.\n- The empirical experiments are promising and show the model-agnostic property of the proposed method. The ablations are extensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed Adaptive Depth Message Passing GNN (ADMP-GNN), which learns a preset-max-depth GNN, and outputs predictions per layer. The authors also proposed training tricks and layer selection heuristics during test time. The network is efficient and shown to be effective on some real-world datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Some related works are missing. The authors claim none work has been done for adaptive layer GNNs, which is not true. There are [1] [2] [3] [4], which may not have exactly the same method, but they also share the idea of adaptive message passing for nodes. \n- Similarly, these works should also be compared in the experiments. \n\n[1] Spinelli, Indro, Simone Scardapane, and Aurelio Uncini. \"Adaptive propagation graph convolutional network.\" IEEE Transactions on Neural Networks and Learning Systems 32.10 (2020): 4755-4760.\n[2] Finkelshtein, Ben, et al. \"Cooperative graph neural networks.\" arXiv preprint arXiv:2310.01267 (2023).\n[3] Faber, Lukas, and Roger Wattenhofer. \"GwAC: GNNs with Asynchronous Communication.\" Learning on Graphs Conference. PMLR, 2024.\n[4] Errica, Federico, et al. \"Adaptive Message Passing: A General Framework to Mitigate Oversmoothing, Oversquashing, and Underreaching.\" arXiv preprint arXiv:2312.16560 (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "we propose Adaptive Depth Message Passing GNN (ADMP-GNN), a novel framework that dynamically adjusts the number of message-passing layers for each node, leading to enhanced performance."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024admpgnn,\ntitle={{ADMP}-{GNN}: Adaptive Depth Message Passing {GNN}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yAU5X77S06},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Neural Networks (GNNs) have proven to be highly effective in various graph representation learning tasks. A key characteristic is that GNNs apply a fixed number of message-passing steps to all nodes in the graph, regardless of the varying computational needs and characteristics of each node. Through empirical analysis of real-world data, we show that the optimal number of message-passing layers differs for nodes with different characteristics. This insight is further validated with experiments on synthetic datasets. To address this, we propose Adaptive Depth Message Passing GNN (ADMP-GNN), a novel framework that dynamically adjusts the number of message-passing layers for each node, leading to enhanced performance. This approach is applicable to any model that follows the message-passing scheme. We evaluate ADMP-GNN on the node classification task and observe performance improvements over a wide range of GNNs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ba914398afee412638d7f3f260cef3b68fe74592.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/7a13519f8b8bc2912dac822542756d5082ea4b7c.zip"
},
"title": {
"value": "ADMP-GNN: Adaptive Depth Message Passing GNN"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yAzN4tz7oI | RDT-1B: a Diffusion Foundation Model for Bimanual Manipulation | main | Active | robot learning;diffusion models;foundation models;bimanual manipulation | applications to robotics, autonomy, planning | 5;6;6;8 | 3;4;4;3 | 2;3;3;4 | 2;3;4;4 | 3;3;3;4 | 6.25 | 3.5 | 3 | 3.25 | 3.25 | -0.229416 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The model uses frozen vision encoders. Does it mean existing pretrained visual representation is sufficient for robot manipulation? I wonder if the authors spot any cases where pretrained visual encoder is insufficient and lead to unsatisfactory performance.\n- In pretraining on heterogeneous data with varied control frequency, the model takes control frequency as conditioning. I wonder how this strategy works in practice - does the model learns policy prior corresponding to different control frequency?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents a complete and remarkable research work that pushes forward the boundary of large-scale robot learning. \n- The model is developed on top of the diffusion transformer with a unified action space, which allows large-scale pretraining on heterogeneous robot data to boost the performance\n- The authors collect the largest robot dataset for bimanual manipulation with comprehensive task coverage for fine-tuning the model\n- The experiments show that the advantage of the model from a foundation model aspect: generalization, few-shot learning, and scaling behavior"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper develops a 1.2B-parameter robotics foundation model that is trained and evaluated on real robot data for bimanual manipulation. The model is trained with imitation learning: (i) pretrained on 1M trajectories combining available datatsets collected for different robots, and (ii) fine-tuned on a self-collected dataset with 6k demonstrations for a Mobile Aloha robot. The model adopts a diffusion transformer (DiT) architecture that takes multi-modal inputs (images, language, etc.) and generate action chunks with multimodal distribution. \n\nThe model is evaluated on 7 real robot tasks against mainstream baselines. The comparison shows that the model can: (i) generalize zero-shot to novel objects, scenes and language, (ii) learn new skills with few data, (iii) accomplish dexterous tasks. Ablation studies show that larger model and pretraining with large data significantly boost the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the paper demonstrates that the foundation model is allows zero-shot and few-shot generalization, and can achieve dexterous manipulation, each of these characteristics is only validated on ~one task and may be insufficient. Evaluations on more tasks and existing benchmark tasks will complete the results.\n- It seems that the baselines are not trained on the complete fine-tuning dataset. This doesn't form an apple-to-apple comparison.\n- The writing of the paper has room for improvement. Some of the sentences are too long, which prevent reading of the paper smoothly.\n- Mistakes in citing papers. For example, it seems that \"Xie et al. 2020\" is wrongly used as the reference of DiT in line 83 and 319."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My questions are mainly also discussed in the weakness. Here is an summary:\n1. Given that the diffusion transformer already contains multiple nonlinear layers, why does adding an additional layer improve performance? Have you conducted ablation studies to support this choice?\n2. In the original UNet diffusion policy, the last layer is a Conv1dBlock. Have you compared the performance of your MLP block with this alternative?\n3. What is the the MLP block performance gains on other tasks?\n4. Can you provide more insight into how the model design specifically addresses high frequency change?\n5. What are the performance gains from using all data through this unified action space compared to previous methods that only use robots with similar action spaces or retain subsets of inputs with a common structure?\n6. How does the proposed padding strategy improve performance compared to zero padding? Could you provide comparative results?\n7. How do these normalization layers integrate into the transformer architecture, and how do they address instability? Have you tested different configurations of these layers?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper demonstrates strong performance in scaling up robotics models. It presents several interesting components that improve training stability and performance.\n\nThe unified action space, and especially the padding technique, is interesting.\n\nThe paper shows capabilities on several challenging real-world bimanual manipulation tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an effort toward building a foundation model for bimanual manipulation. It proposes techniques to unify the action space, enabling training on a very large robot dataset. The authors also scale up the model to 1.2B parameters, making it the largest diffusion transformer model for robotics. In this process, the authors identify several key elements to improve training stability and performance. The resulting model achieves good zero-shot generalization performance on unseen and complex tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Several claims are not very precise and not very clear. For example, the authors mention the nonlinearity and high frequency of robotic data. While it is true that the data is nonlinear, how does the proposed method tackle this challenge? The authors argue that changing the last linear layer to an MLP block solves this problem and brings significant performance improvements. While the performance is impressive, I think this requires more careful ablation experiments. Firstly, the entire diffusion transformer is already highly nonlinear due to stacking multiple layers, so why does adding more layers help in this case? Secondly, in the original UNet diffusion policy, the last layer is a Conv1dBlock; have the authors compared with that? Lastly, why is it only evaluated on the dexterity task?\n\nIt is also unclear how the model design choices address the “high frequency of robotic data.” Given that these two claims are highlighted in the abstract as the main challenges, I believe they require more careful analysis and discussion.\n\nAs the unified action representation is a major contribution of this paper, there should also be more analysis of this aspect. For example, what are the performance gains from using all the data because of the unified action space, compared to previous methods that use “robots with similar action spaces (Yang et al., 2023; Ghosh et al., 2023; Kim et al., 2024) or retain only a subset of inputs sharing the same structure (Collaboration et al., 2023; Yang et al., 2024)”? Additionally, what is the performance gain of the proposed padding strategy compared with padding with all zeros?\n\nThe authors argue the necessity of using RMSNorm and QKNorm, but they only show the loss without them (Figure 4(a)), which provides very little information on how effective the proposed approach is and whether it addresses the instability issue. It also does not mention how to integrate these normalization layers within the transformer.\n\nScaling up the models and data is certainly attractive, and the paper shows impressive results. However, most of the analysis are “binary”, which means the results are either with or without. Showing more datapoints (model with different size, using different percentage of the dataset) will present more insights to the reader."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Could the authors elaborate on why diffusion models were specifically chosen over other generative methods like VAEs or GANs for this task? While diffusion models show high expressiveness, a comparison or rationale would clarify their unique benefits in bimanual manipulation.\n\n- The Physically Interpretable Unified Action Space is innovative, but how does it handle robots with vastly different kinematics or action constraints?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The paper introduces a novel application of diffusion models to bimanual manipulation, addressing the high-dimensional, multi-modal action space through a Physically Interpretable Unified Action Space. This approach is a creative extension of diffusion models in robotics, particularly for dual-arm coordination, a challenging domain with limited prior work.\n\n- The model is rigorously tested, with comprehensive experiments demonstrating superior performance over existing baselines. The use of the largest multi-robot dataset and a specialized bimanual dataset for fine-tuning enhances the validity of results and supports the model's effectiveness.\n\n- This work contributes substantially to the field by advancing foundation models for robotic manipulation. RDT’s capabilities for zero-shot generalization, few-shot learning, and instruction following mark a significant step towards adaptable and scalable robotic models, with promising implications for real-world applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the Robotics Diffusion Transformer (RDT), a large-scale diffusion-based model for bimanual robotics. RDT introduces a Physically Interpretable Unified Action Space to standardize actions across robots, enhancing generalization, and uses diffusion models to handle complex, continuous action distributions. Pre-trained on the largest multi-robot dataset and fine-tuned on a custom bimanual dataset, RDT demonstrates strong zero-shot and few-shot generalization, outperforming existing methods in dexterous, real-world tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper introduces a Physically Interpretable Unified Action Space for handling data heterogeneity, but additional details on potential limitations or failure cases during training with highly diverse data would be beneficial. This could include examples where action standardization might lead to loss of unique features across robots.\n\n- Although the experiments show impressive results, expanding the evaluation to more varied and complex real-world tasks (beyond the 6,000-episode dataset) and more hardwares(beyond ALOHA) could further validate RDT's robustness.\n\n- The paper proposes several innovative multi-modal encodings (e.g., masking, cross-attention) but lacks ablation studies on these design choices. Showing how each component contributes to performance could clarify their impact on handling visual and language-conditioned tasks effectively.\n\n- There's a typo at L76, it should be \"data\" instead of \"date\" ."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. \"Does the RDT-1B fine-tuning use the entire self-collected dataset, and is it not fine-tuned separately for each task in the evaluation?\"\n2. \"Regarding the computation of success rate for the 'wash cup' task with 'seen cup1': the success rate (SR) for 'get water' is 50, for 'pour water' is 87.5, and for 'place back cup' is also 87.5, yet the overall SR is listed as 50. Since the 'get water' subtask has an SR of 50, and the following subtasks have SRs below 100, how is the total SR calculated as 50?\""
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "A foundational model for bimanual manipulation is absent in the current community, which is an important direction; aligning different robot embodiments is also a crucial question for pre-training on large-scale datasets. The proposed action representation is simple yet effective for pre-training on diverse robot datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a unified action representation to align different robots, facilitating pre-training on diverse robot datasets for bimanual manipulation. Additionally, it introduces a diffusion transformer-based architecture with several modifications for enhancing policy learning, and scaling up with large datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Compared to the robot datasets used for pre-training the baseline, such as OpenVLA, this paper appears to use a more diverse set of datasets, including additional bimanual manipulation datasets like ALOHA and Mobile ALOHA, contributing nearly 10% of the total datasets. Fine-tuning a baseline pre-trained on single-arm datasets for a bimanual manipulation setting may result in poor performance on bimanual tasks, making it difficult to demonstrate that using the diffusion transformer architecture is superior to using a large language model as the pre-training backbone."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024rdtb,\ntitle={{RDT}-1B: a Diffusion Foundation Model for Bimanual Manipulation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yAzN4tz7oI},\nnote={under review}\n}"
},
"abstract": {
"value": "Bimanual manipulation is essential in robotics, yet developing foundation models is extremely challenging due to the inherent complexity of coordinating two robot arms (leading to multi-modal action distributions) and the scarcity of training data. In this paper, we present the Robotics Diffusion Transformer (RDT), a pioneering diffusion foundation model for bimanual manipulation. It is built on scalable Diffusion Transformers (DiTs), which can effectively represent multi-modality, with innovative designs to deal with the heterogeneity of multi-modal inputs and to capture the nonlinearity and high frequency of robotic data. To address data scarcity, we first introduce a Physically Interpretable Unified Action Space, which can unify the action representations of various robots while preserving the physical meanings of original actions, facilitating learning transferrable physical knowledge. With the above designs, we managed to pre-train RDT on the largest collection of multi-robot datasets to date and scaled it up to $1.2$B parameters, which is the largest diffusion-based foundation model for robotic manipulation. We further fine-tuned RDT on a self-created multi-task bimanual dataset with over $6$K+ episodes to refine its manipulation capabilities. Experiments on real robots demonstrate that RDT significantly outperforms existing methods. It exhibits zero-shot generalization to unseen objects and scenes, understands and follows language instructions, learns new skills with just 1$\\sim$5 demonstrations, and effectively handles complex, dexterous tasks. Code and a Demo video are provided in the supplementary materials."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"robot learning",
"diffusion models",
"foundation models",
"bimanual manipulation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e78850b39075ba973908cf24578b26a9b4c85065.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/790d1f8285e80d0c411bcec3d39cc31ca25b7072.zip"
},
"title": {
"value": "RDT-1B: a Diffusion Foundation Model for Bimanual Manipulation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yBLBls6ryd | Fast Fractional Natural Gradient Descent using Learnable Spectral Factorizations | main | Active | natural gradient;Riemannian optimization;positive-definite manifold;Kronecker-facotrized;Shampoo | optimization | 3;3;5;5;5;5;8 | 2;3;3;2;4;2;4 | 3;2;3;2;3;2;4 | 2;1;2;3;3;2;4 | 1;1;2;2;2;1;3 | 4.857143 | 2.857143 | 2.714286 | 2.428571 | 1.714286 | 0.536783 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. what is the meaning of \"root-free\" scheme?\n2. Can you please elaborate on how you define the sequence of the curvature matrices in \"Iterate matching\" scheme to measure the performance of your method? \n3. Can authors reference which work shows superior generalization of the FNGD for p's either than 2, 4, 1?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors introduce a unifying framework for a class of preconditioned gradient agorithms as special cases of fractional natural gradient descent, and gives a general framework to implement it more efficiently in a spectral parameterization that is motivated by a Riemannian optimization algorithm in a reparameterization of the space to the normal coordinates."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce a unifying framework way to calculate Fractional natural gradient descent estimates such as RMSprops or Shampoo by considering a diagonalization of the curvature matrix $S$ as $B diag(d) B^\\top$ for orthogonal matrix $B$, and updating $B$ and $d$ instead. This parameterization allows them to calculate the fractional power easily as $B \\diag(d^{-1/p})B^\\top$. To update the parameters $B$ and $d$, the challenge is that there are constraints on the space of the middle matrix that has to be diagonal, and $B$ has to be orthogonal. The authors then seems to use a reparameterization of the space $(B,d)$ in what they call a \"root-free\" approach, so that they can take the Riemannian gradient steps in that reparameterization with any constriants, in a specific manifold metric that is related to a concept called \"Gaussian variational distrbution.\" To do so, the they use previous ideas going back to work of Amari et al."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It is nots clear what is their contribution and what has been don ein previous work.\n2. Description of the use of the gaussian variational approach in line 286 is very vague, it is not even clear why this approach is well-motivated or what is $\\ell$.\n3. Authors keep taking about suing local coordinates but it's not exactly what coordinate system for the manifold of matrices they are referring to, do you mean the normal coordinate system using the xponential map?\n\n4. The definition of function $Tril$ is unclear, and authors define what they mean by $Cayley$ in the middle of the paper.\n5. The high level ideas are vague and include terms that is not clear what they exactly refer to.\n6. It would be nice if authors can provide further motivation or example on which practical case the kronecker-structured spectral parameterization is useful. \n7. It is not clear what is the manifold metric the authors consider in their Riemannian appraoch. they connection of the gaussian variational framework, which seems to go back to work of Amari, to obtaining a manifold metric seems unclear. \n\nOther issues:\nH is not defined in equation 3.\nU and Tril are not defined in algorithm box on line 162\nLine 107 the notation is super unclear, is Mat(g) the diagonal matrix with entries of g? If so what is the difference between $G^\\top G$ and $G G^\\top$, and $S_C, S_K$?\nAuthors refer to “root-free” method but they don’t define what they mean\nEquation 10 is not clear at all\nAuthors don't provide any intuition of their approach/contribution e.g. for equation 10 or for the algorithm box in line 162\n\nMinor issue:\nClaim 1 is named Lemma 1 in appendix"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "NA"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Consider a unified framework for adaptive methods through the fractional power of the Fisher matrix."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a method for fractional natural gradient descent (FNGD) using learnable spectral factorizations. It introduces a way to factorize the Fisher matrix and apply fractional powers, aimed at enhancing neural network training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Clarity Issues: Notation and concepts are unclear, making the method difficult to follow. For example:\n- What is $B$ matrix?\n- The purpose of learning the Kronecker factorization is under-explained \n- What is matrix H? (In line 101)\n- Meaning of the “mat” notation (line 107)\n- What are matrices $S^K$ and $S^C$?\n- What is $\\otimes$ product?\n- Description of the “Gaussian problem”?\nFor that concept or notations, the authors should give an appropriate introduction and explanation and references\n\nWeak Motivation: \nThe paper provides an insufficient explanation as to why this complex approach is necessary or how it significantly advances existing methods. It does not provide new insight or guidelines (or at least does not explain clearly) that can potentially be utilized to result in efficient optimization or training.\n\nAmbiguity in Implementation: \nThe handling of factorization ambiguities in high-dimensional settings is unclear, particularly in terms of computational impact and scalability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N.A."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Could you provide a more intuitive explanation of the advantages of using spectral parametrization over traditional matrix decomposition methods, especially regarding computational efficiency?\n\nIt would also be valuable to include more comparisons with recent advancements in adaptive gradient methods, particularly as applied to large-scale language models.\n\nAdditionally, a clearer summary of the implications of using arbitrary fractional powers in gradient descent in both the introduction and conclusion would enhance accessibility, helping readers better grasp the broader impact of this approach.\n\nWhy was the KFAC method excluded from the numerical comparison?\n\nAs an early suggestion, moving the Kronecker extension to the appendix could allow more space for a structured development of the core method in the main text, which may improve readability."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The introduction of learnable spectral factorization to FNGD is an innovation that addresses computational bottlenecks in second-order methods, such as those found in Shampoo or KFAC.\n\nThe authors effectively address numerical instability, making the proposed method suitable for low-precision arithmetic, which may improve the efficiency in large-scale machine learning tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel approach to fractional natural gradient descent (FNGD) by a learning method, which addresses the challenges of computing the matrix fractional powers. The authors present a Riemannian framework to learn eigen-factorized Fisher estimations on-the-fly, which allows the efficient application of arbitrary fractional powers without requiring matrix decompositions. Numerical experiments show the effectiveness in positive-definite matrices optimization problems and neural networks training tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "To strengthen the argument for using this method over existing alternatives, an ablation study examining the effects of different spectral factorization techniques on convergence speed and generalization performance would be beneficial.\n\nWhile the paper demonstrates scalability in terms of Kronecker-structured preconditioners, additional details on computational complexity and memory requirements—particularly in comparison to popular methods like Adam or Shampoo—would offer a clearer understanding of the associated trade-offs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1) Is the method implemented as a PyTorch optimizer?\n\n2) How many parameters are required for the method's performance?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed method is focused on interesting and valuable problems to solve. The paper has a good and easy-to-follow introduction which motivates the proposed method. The method has a big potential from the practical perspective as it exploits curvature information."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In the paper, the author propose a novel optimization method for deep learning. It aims to learn spectral factorization of Fisher preconditioning without matrix decomposition. By knowing the spectral factorization it is possible to apply arbitrary power of the preconditioning. Finally, the authors provide evidence of practical effectiveness and performance of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The style of presentation is confusing in some parts of the paper. Next, I list some examples: a) Figure 1 shouldn’t be presented as is. It has two methods inside and half of the paper description. It ruins the style and flow of the paper. b) The authors present theoretical results as “claims”, however, next they prove it as lemmas. So, it would be reasonable to formulate them as lemmas in the main text as well. c) In lines 151-161 and 242-248, there is an extra spacing which shouldn’t be there. d) Some colorings are confusing. For example, line 353, 745, 833, 932. It might be forgotten highlights of the changes. \n\n2. The experimental section. As the paper doesn’t provide convergence proofs, we can say it is mostly an empirical paper backed by theoretical intuition. a) Hence, the extensive experimental results are expected. In the paper, only one figure presents the performance of the methods for 3 vision transformers on one dataset. It does not seem enough for an experimental paper. So, I would recommend adding experiments for common DL benchmarks to understand the methods' performance in comparison to state-of-the-art methods. Some examples can be found here https://mlcommons.org/benchmarks/algorithms/ b) Another issue is that the experiments’ description is lacking proper details and descriptions. It is limiting the reproducibility of the results. There are no methods parameters, learning rates, and so on. The attached code may help as well. \n\nSome minor misprints: a) Please, unify the use of “RMSprop”, as currently there are multiple variants of it (RMSprop, RmsProp, RMRprop) b) Line 709: misprint in “Given”"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "**Q1:** In Line 318, the authors manage a phrase: ''When changing coordinates, the metric representation has to be changed accordingly Lee, (2018) to make RGD invariant to coordinate transformation''. Please, could the authors clarify a steps like that with more formal presentation?\n\n**Q2:** Can the authors clarify a conceptual difference between the *Procedure for Full-matrix Spectral Parametrizations* and procedure in *Riemannian Approach for Obtaining Root-free Update Schemes*? As, naturally, the *Step 1* and *Step 2* of the latter approach describes the whole pipeline in general.\n\n**Q3:** Why does the *Claim 5* not considered as a definition?\n\n**Q4:** Could the authors clarify, please, how does it follows from Figure 4, that fraction roots are better than p=2?\n\n**Possible suggestions:**\n\n1) Rename ''default update scheme'' and ''our scheme'' with more sophisticated names.\n2) Explicitly write Lemmas 1-5.\n3) Formally define Fisher-Rao metric in the gentle introduction to the approach.\n\n\n\nJohn M Lee. Introduction to Riemannian manifolds, volume 2. Springer, 2018."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**S1:** The authors study a new form of the preconditioneer – spectral factorization of the curvature approximation. They show, that for such a preconditioneers can b eapplied an update rule allowing to learn the spectral factorization at each step of the optimization problem.\n\n**S2:** The approach of the paper extends to the Kronecker factorization and analysis of the spectral decomposition for the Fisher-Rao metric."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an adaptive update scheme for full-matrix spectral factorization. This method serves as the efficient approximation of the preconditioning matrix, thus, allowing to perform a step of natural gradient descent faster."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**W1:** The writing of the paper is hard to follow.\n\n**W2:** The are no explicit formulation of lemmas in the paper.\n\n**W3:** The key points in the theoretical analysis is missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How did you generate matrix $Q$ in the positive-definite matrix optimization problem of Section 4?\n2. For the low-precision NN training in Section 4, you claimed that $p=1$ is better than $p=2$, but the difference is not significant from the plots presented. Do you have any other numerical result showing a more significant discrepancy?\n3. Did you have a complete derivation of the procedure for Kronecker-structured Spectral Parametrizations?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. This approach allows for the cheap application of arbitrary fractional powers, which distinguish it from other similar RGD approaches (e.g. Cholesky-based RGD).\n2. This approach circumvents the numerical instabilities of matrix decompositions, making it easy to implement and more practical.\n3. This paper is an extension based on previous literatures which viewed learning the curvature approximation (preconditioner) as learning the covariance of a Gaussian variational distribution by performing RGD on the manifold of dense or Kronecker-factorized positive-definite matrices. This paper further separates the diagonal part and orthogonal part by spectral decomposition, and incorporates the new constraints arising from the spectral decomposition for the Fisher-Rao metric in derivation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a stable and efficient Riemannian framework to learn eigen-factorized Fisher estimations, allowing for the cheap application of any fractional powers. The efficacy of this approach is demonstrated in numerical tasks including positive-definite matrix optimization and low-precision NN training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. No numerical performance in large-scale setting is presented."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "My questions mostly overlap with the confusion I have in the Weaknesses section above. It would be great if authors could address those concerns, in which case I would happily increase my score."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper addresses a relevant and interesting problem in optimizing deep neural networks, which may inspire future research ideas. \n\n2. In addition to introducing the counterpart for the classical NGD methods, the paper also introduces the update rule that works for methods like Shampoo, which preconditions on both sides of the matrix iterate. \n\n3. The paper clearly describes its own contribution and credits the contribution of the previous works."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a new update rule for the approximation of Fisher Information matrix in the natural gradient descent and its variants like RMSprop and Adam(W). The intuition is based on the observation that the preconditioner can be interpreted as the inverse covariance matrix of a Gaussian variational distribution (Lin et al. 2024). The authors propose to learn a decomposition of the preconditioner so that the fractional inversion can be computed efficiently. Numerical experiments show that the iterates generated by such a method have comparable efficiency in approximating the Fisher information and good performance when used to train neural networks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main motivation of the method, which is the flexibility of choosing p, is mentioned but not very well established. I think the authors should at least make an effort on the numerical side. They mention that \"This shows that the potential of using other fractional roots.\" by the end of the section 4, which seems weak as this is the main reason for potential practitioners to use this method over its simpler counterpart. Some results showing that the best training result can only be obtained by some p other than 1 or 2 can be very persuasive in this case. \n\n2. In terms of the computational cost, the comparison between this method and previous methods is not clear. The authors calibrated the running time for each method in section 4 by adjusting the number of iterations to update the preconditioner, but it is still vaguely defined and not quantified. One good comparison is to those methods which directly compute the matrix decomposition using high-precision arithmetic. I think this will establish the other important motivation of the method.\n\n3. The method suffers from a per-iteration error of $O(\\beta_2^2)$, which is non-negligible for constant $\\beta_2$. Also, the inverse approximation when calculating the Cayley map introduces another source of inaccuracy. \n\n4. There are some notation ambiguity in the paper. For example, Kronecker product and functions like Mat($\\cdot$) are not defined clearly. Diag($\\cdot$) and diag($\\cdot$) are used interchangeably. Eq 3 seems to come from nowhere when it first appears in the paper (no introduction before and after, no definition of $\\mathcal{H}$). Claim 2 eventually becomes Lemma 2 in the appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024fast,\ntitle={Fast Fractional Natural Gradient Descent using Learnable Spectral Factorizations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yBLBls6ryd},\nnote={under review}\n}"
},
"abstract": {
"value": "Many popular optimization methods can be united through fractional natural gradient descent (FNGD), which pre-conditions the gradient with a fractional power of the inverse Fisher:\n RMSprop and Adam(W) estimate a diagonal Fisher matrix and apply a square root before inversion; other methods like K-FAC and Shampoo employ matrix-valued Fisher estimates and apply the inverse and inverse square root, respectively.\n Recently, the question of how fractional power affects optimization has moved into focus, e.g. offering trade-offs between convergence and generalization.\n Gaining deeper insights into this phenomenon would require going beyond diagonal estimations and using cheap and flexible matrix-valued Fisher estimators capable of applying any fractional power; however, existing methods are limited by their expensive matrix fraction computation.\n To address this, we propose a Riemannian framework to learn eigen-factorized Fisher estimations on the fly, allowing for the cheap application of *arbitrary* fractional powers.\n Our approach does not require matrix decompositions and, therefore, is stable in half precision.\n We show our framework's efficacy on positive-definite matrix optimization problems and demonstrate its efficiency and flexibility for training neural nets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"natural gradient",
"Riemannian optimization",
"positive-definite manifold",
"Kronecker-facotrized",
"Shampoo"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b56f0b045f9cfb07ed219cb432853ec91bf0af6d.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Fast Fractional Natural Gradient Descent using Learnable Spectral Factorizations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yBhSORdXqq | Modular addition without black-boxes: Compressing explanations of MLPs that compute numerical integration | main | Active | mechanistic interpretability;proof;guarantees;interpretability;numerical integration | interpretability and explainable AI | 3;3;6;8 | 5;3;2;3 | 2;2;3;4 | 1;2;3;3 | 2;2;2;3 | 5 | 3.25 | 2.75 | 2.25 | 2.25 | -0.540738 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Are all the experiments conducted with constant head Transformers? I couldn't find this detail in the Colab notebook since the checkpoints are directly imported for analysis.\n\n- According to [3], networks can learn either \"clock\" or \"pizza\" algorithm, depending on the details of training. How do the authors ensure that the learned algorithm is always \"pizza\"?\n\n- Are Figures 2,5 empirical results or just theoretical visualizations? \n\n- Lines 299-303: The omission of x/2 term seems rather hand-wavy. How does one see that computing the bounds after ignoring this term is a reasonable thing to do? It is unclear to me even after referring to Appendix K.\n\n- Does Table 1 imply that the contribution from $\\\\varepsilon_\\\\phi$ is very large? This seems unlikely since $\\\\psi_i - 2\\\\phi_i$ are quite small in Figure 3.\n\n- Line 349: Does \"brute force\" simply mean empirical?\n\n- Line 373-377: Zhong et al. [3] do not claim that logits are always of the \"pizza\" form. They claimed that either \"clock\" or \"pizza\" algorithm can be found, depending on the hyperparameters.\n\n- Lines 395-397: I am puzzled by the $R^2$ results. The paper sets out to explain the \"pizza\" algorithm. However, the logits seem to align better with the \"clock\" algorithm. \n\n- Lines 401-403: This paragraph claims that the secondary frequencies are important. However, Figure 4 shows that the top Fourier component explains more than 99% of the variance in all of the neurons. How does one reconcile these two results?\n\n[1] Gromov, Grokking modular arithmetic (2023)\n\n[2] Doshi et al., To grok or not to grok: Disentangling generalization and memorization on corrupted corrupted algorithmic datasets (2024)\n\n[3] Zhong et al., The Clock and the Pizza: Two Stories in Mechanistic Explanation of Neural Networks (2023)"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The analysis of secondary frequencies is new.\n\n- The results are presented in a quite candid fashion, with the limitations/discrepancies of the analysis freely discussed, along with potential explanations.\n\n- The code used to produce the results is provided in the form of a link to a Colab Notebook."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the role of MLP layers in a constant-attention Transformer (i.e. ReLU-MLP network) trained on modular addition. They write the overall operation of the network as $embedding \\\\rightarrow ReLU \\\\rightarrow fully-connected$ and write the trigonometric functions corresponding to these components. They show that the sum over same-frequency neurons implements a sum over random phases, which can thought of as approximating an integral. They provide empirical evidence for their claims and show that their method enables the computation of non-trivial error bounds on the outputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concerns lie in regards to the novelty and soundness of this work.\n\n- The authors only consider Transformers with constant attention. This makes the network a ReLU-MLP and not a Transformer. This should be made clear in the paper. If the objective is to study Transformers, then non-constant attention should also be examined.\n\n- Interpretability for MLPs trained on modular addition has already been extensively studied in literature [1,2]. Although these prior works focused on MLPs quadratic activations, many of the qualitative conclusions are similar to the ReLU case studied in this work. The authors should present a detailed comparison to these works and highlight the novelty. (Specifically, what are the insights from the current work that are lacking upon merging the findings of [1,2,3].)\n\n- As stated, it is difficult to draw conclusions from the error bounds -- especially the part with splitting ReLU into identity and absolute value components. This part would benefit from further justification as well as clarity of notation (e.g. lines 309-310)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* You suggest that this approach could be useful for practitioners in other domains. What properties of the model/problem would make it more likely that this approach would yield a good explanation?\n* You leave deriving tighter error bounds to future work. What avenues for proving these bounds are you considering?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "* **Addressing open problems in mechanistic interpretability.** While modular addition is a well-studied problem, the MLP sublayer's behavior has mostly been treated as a black box. By explaining what the MLP is doing, the authors help move the study of this model beyond this black box treatment. While understanding one piece of a toy model may seem like a modest contribution, this is a major missing piece of an active area of research and a major step towards producing and verifying mathematical descriptions of MLP behavior. \n* **Resolving the discrepancy between \"clock\" and \"pizza\" algorithms.** In section 5, the authors solve the mystery of why \"pizza\" algorithm logits resemble \"clock\" logits by explaining the role that secondary frequencies play. This is also an unresolved question in modular addition.\n* **Balance of perspectives.** The authors are thorough about including empirical/computational results, mechanistic descriptions of the modular addition task, and proofs. They also extend results to 150 other transformer models (which differ in their random seed)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "MLP layers in transformer models remain poorly understood in the mechanistic interpretability literature; even detailed analyses of toy models like modular addition transformers tend to treat them as black boxes that compute certain functions and verify their input-output behavior by exhaustive search over the input space. The authors propose to fully explain—by which they mean find a description that is linear in the number of parameters—the MLP's behavior in the modular addition task. They determine that the MLP is responsible for numerically computing an integral of a trigonometric identity that is useful for the modular addition task, and even prove non-vacuous bounds on the error of their description."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **Limited scope.** Although the paper is thorough and addresses an open question in mechanistic interpretability, its concrete contribution is simply to explain one component of one model (albeit under multiple random seeds) on one toy problem. While this is a step forward, and the authors briefly describe the broader implications for non-algorithmic interpretability practitioners in Section 7, the paper's contribution is circumscribed (arguably modest) as written. \n* **Bounds are crude.** As the authors acknowledge (354), the bounds they prove are quite loose, owing to an incomplete understanding of the model. Proving tighter bounds or arguing more convincingly for why tighter bounds are not possible would improve the paper. \n* **Clarity.** The argumentation in some of the mathematically dense sections (especially Section 5) can be hard to follow."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Have the authors investigated the effect of weight decay tuning on the clarity of their experimental results?\n2. What factors influence the emergence of secondary frequencies in the model?\n\nLine 023: \"trignometric\" typo in abstract\nLine 992: Broken equation ref"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Simple setting with a clear explanation of their setting\n2. Offered empirical check of their theoretical results"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper examines how 2-layer ReLU MLPs implement the trigonometric identities required by the 'Pizza' algorithm (previously described in earlier work) within a one-layer modified transformer architecture. The authors found that the MLP approximate a trigonometric integration by summing over its hidden neurons, they also provide non-vacuous bounds on the output of the MLP on all inputs in time linear in the parameters."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The work only focuses on explaining details of a component from the previously proposed Pizza Algorithm (Zhong et al., 2023), specifically how MLPs sum periodic features generated before activation functions. Furthermore, the simplified model (lines 125-126) is nearly identical to the fully solved model presented in [1], with ReLU activation replacing the quadratic activation. Since exact finite summation for ReLU remains an open problem, the authors instead approximate the sum using integral methods and argue that neurons effectively approximate this integral. \n\nWhile I appreciate the authors work on explaining the role of secondary frequencies in Section 5, it represents only a minor contribution. Overall, this paper's contribution falls below the standard expected at ICLR-level conferences.\n\n[1] A. Gromov, Grokking modular arithmetic, arXiv:2301.02679"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Is it possible to extend the proposed framework to MLP layers in a deeper transformers?\n\n- The study seems only focuses on understanding MLPs within a constrained setting where the MLP approximates trigonometric functions, is it possible to extend to non-trigonometric functions? \n\n- I am still confused on the definition of “complete interpretation”, are there any evaluation metrics?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors provides a depth analysis, with a formal definition of “complete interpretation”, a clear problem setting, and an evaluation on a simplified toy model, which enables a focused and mathematically rigorous analysis.\n\n- To evaluate the practical usage of the numerical integration interpretation, the authors validate the methods by establishing non-vacuous bounds for the MLP outputs in linear time."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors address the challenge of interpretibility of MLP layers in transformer models. Focusing on toy models trained for modular addition, the authors go beyond “approximate explanation” and propose to form a “complete interpretation” through a quadrature scheme, where neurons can be understood as approximating areas under curves in trigonometric functions. The method allows them to bound the MLP in total time linear in the neuron numbers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper is in general hard to follow for a broader audience, especially for those unfamiliar with topics such as mechanistic interpretability, quadrature themes etc. \n\n- A discussion on the scalability beyond the toy models is missing. While a simple toy model allows for a focus analysis, a discussion on scaling to more complex real-word tasks is beneficial.\n\n- As the author mentioned that there are couple of previous studies on the interpretability of MLPs, a comparative study with existing methods would be beneficial to fully understand the advantage of the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We provide mathematical and anecdotal evidence that an MLP layer in a neural network implements numerical integration."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024modular,\ntitle={Modular addition without black-boxes: Compressing explanations of {MLP}s that compute numerical integration},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yBhSORdXqq},\nnote={under review}\n}"
},
"abstract": {
"value": "Interpreting the behaviour of MLP layers in transformer models remains an open problem.\nThe goal of mechanistic interpretability is identifying the specific function learnt by the model, which is challenging to do in nonlinear domains---like MLPs---where low precision approximations of the learnt function can leave many degrees of freedom.\nWhile MLP layers that approximately implement linear functions can be well understood with low-precision approximations, we take on the challenge of approximating densely-connected MLPs.\nWe propose a formal definition for “full interpretation” of a circuit: a compression of the explanation to a size that is linear in the parameter count of the circuit.\nWe investigate in the classic setting of the toy models trained on modular addition Nanda et al. (2023) and Zhong et al. (2023), where the MLP layer is treated as a black box. We extend the analysis to describe exactly *how* the MLP layer computes the required trignometric identities. \nWe find that the MLP layer in one-layer transformers implementing the “pizza” algorithm can be understood as evaluating a quadrature scheme, where each neuron computes the area of a rectangle under the curve of a trigonometric integral identity.\nWe confirm this interpretation by using it to compress the MLP layer of a collection of modular addition transformers and prove non-vacuous bounds on the outputs of the MLP layer on *all* inputs in total time *linear in the number of neurons*.\nOur code is available at [https://tinyurl.com/mod-add-integration](https://tinyurl.com/mod-add-integration)."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"mechanistic interpretability",
"proof",
"guarantees",
"interpretability",
"numerical integration"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c2fb49ccd27ef2d6ce0fa9b8241caf1ae486b2bf.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Modular addition without black-boxes: Compressing explanations of MLPs that compute numerical integration"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yBlVlS2Fd9 | WavTokenizer: an Efficient Acoustic Discrete Codec Tokenizer for Audio Language Modeling | main | Active | speech representation;discrete codec;audio language model | applications to computer vision, audio, language, and other modalities | 5;5;8;10 | 5;5;5;4 | 3;2;3;3 | 2;1;4;3 | 2;3;3;3 | 7 | 4.75 | 2.75 | 2.5 | 2.75 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "refer to weakness"
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Achieves state-of-the-art performance using only a single codebook with 40 or 75 tokens, demonstrating remarkable efficiency.\n- Covers multiple domains, including audio, speech, and music.\n- Its single codebook capability supports efficient training of large language models (LLMs).\n- Provides a comprehensive analysis of different settings, along with detailed ablation studies that validate the impact of each component.\n- The paper is well-written and easy to follow, with a comprehensive analysis included."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces WavTokenizer, a codec tokenizer for audio that achieves extreme compression and superior reconstruction quality compared to previous state-of-the-art models. WavTokenizer requires only a single quantizer with 40-75 tokens per second for 24kHz audio, while still preserving high subjective quality and rich semantic content. Extensive experiments demonstrate WavTokenizer's effectiveness across speech, audio, and music, with competitive results in both objective and subjective metrics, and ablation studies confirm the impact of each component."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Here’s a refined version of the weaknesses:\n\n- The evaluation could be more thorough by incorporating existing benchmarks such as Codec-Superb and DASB to enable a more comprehensive comparison of the proposed method against existing models under standardized settings.\n- The model currently supports only a 24kHz sampling rate; I wonder if you anticipate any challenges in adapting WavTokenizer to different sampling rates. It would be valuable to study its performance across different sampling rates, such as lower (16kHz) and higher (44kHz or 48kHz) rates."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1, Missing baselines: SemantiCodec[1], Single-Codec[2], Mimi (Moshi) [3].\n\n2, Weak Semantic Performance: Compared to the ARCH benchmark (https://huggingface.co/spaces/ALM/ARCH), the semantic performance of WavTokenizer is far worse than models like HuBERT base. Therefore, the claim of \"rich semantic information\" does not hold up well. Moreover, it is unclear why WavTokenizer's semantic capabilities were compared only against acoustic codec models, rather than semantic codecs. Why not compare against semantic codecs like SpeechTokenizer[4], SemantiCodec[1], or Mini[3]?\n\n3, Single-VQ Assumption: The entire premise of the paper is based on the assumption that using a single-VQ codec is better for audio language modeling compared to RVQ. But is this correct? The paper mentions that RVQ-based codecs like DAC require 900 tokens per second, which is true, but it fails to acknowledge that RVQ codecs typically have a temporal resolution of only 50Hz, and the most advanced models like Mini have even lower resolution at 12.5Hz. In practice, language models typically need to model at frequencies of 50Hz or less. So, what is the real advantage of a single-VQ codec with 40 or 75 tokens per second? Moshi and Mini have already demonstrated success in low-latency speech dialogue applications, but what about single-VQ? Is reconstructing all acoustic details truly beneficial for language modeling? Does an LLM truly need to model timbre and detailed acoustic features, or is focusing solely on semantic content sufficient? Therefore, you must at least demonstrate the effectiveness of single-VQ in practical applications, as stated in the weakness section, rather than constructing the paper solely based on this assumption.\n\n[1]: Liu H, Xu X, Yuan Y, et al. SemantiCodec: An Ultra Low Bitrate Semantic Audio Codec for General Sound[J]. arXiv preprint arXiv:2405.00233, 2024.\n\n[2]: Li H, Xue L, Guo H, et al. Single-Codec: Single-Codebook Speech Codec towards High-Performance Speech Generation[J]. arXiv preprint arXiv:2406.07422, 2024.\n\n[3]:Défossez A, Mazaré L, Orsini M, et al. Moshi: a speech-text foundation model for real-time dialogue[J].\n\n[4]:Zhang X, Zhang D, Li S, et al. Speechtokenizer: Unified speech tokenizer for speech large language models[J]. arXiv preprint arXiv:2308.16692, 2023."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The overall approach is sound. The paper is straightforward and easy to understand, with a clear structure that makes it accessible to readers. Additionally, the introduction provides a good overview of the context and background, helping readers to understand the problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces WavTokenizer, a single-vq codec aimed at simplifying and improving the current audio language modeling approaches by replacing codec with multiple-vq. It claims to achieve competitive reconstruction quality while enhancing integration with language models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper introduces WavTokenizer, a single-VQ codec designed to replace the current codec + LLM audio generation systems. However, there is no experimental evidence to support that WavTokenizer can effectively achieve this. Existing codec + LLM systems can generally be categorized into RVQ-style generation (e.g., VALL-E, MusicGen, Moshi) or semantic-to-acoustic approaches (e.g., SpearTTS, SeedTTS, MusicLM), all of which demonstrate strong performance.\n\nFirstly, there are no experiments showing that WavTokenizer combined with an LLM outperforms any of the existing systems in practical applications such as TTS or music generation. Secondly, when considering the codec itself, WavTokenizer's reconstruction quality is significantly worse than RVQ-based codecs, and its semantic performance falls short compared to semantic token like HuBERT or WavLM.\n\nGiven these shortcomings, it is difficult to see any tangible improvements or significant contributions that WavTokenizer offers to the current field of audio generation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How was VQ utilization calculated in Figure 2(b) of the paper?\n- It seems that WavTokenizer is not inherently streamable. Can WavTokenizer be extended to support streaming encoding and decoding?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The acoustic codec representation model is a crucial technology in the current speech domain. WavTokenizer addresses one of the core challenges in the field by achieving high-quality audio reconstruction with only 40 or 75 tokens.\n- WavTokenizer introduces a novel single-layer quantizer concept, demonstrating its potential in TTS tasks and offering a promising single-layer solution for codec-LLM architectures.\n- From a methodological standpoint, WavTokenizer revisits vector quantization (VQ) in the speech domain and proposes a larger codebook space, a more powerful decoder (with attention mechanisms), and an extended context modeling window. These innovations appear to be effective.\n- The model achieves strong experimental results across reconstruction tasks, semantic understanding tasks, and downstream TTS tasks.\n- The open-sourcing of the complete training and inference code, along with model weights, will contribute to the development of the research community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces WavTokenizer, while preserving the classical acoustic codec model paradigm, achieves high-quality audio reconstruction using only 40 or 75 tokens per second. By proposing a larger codebook space, integrating attention mechanisms, and extending the context window, WavTokenizer demonstrates impressive results in audio reconstruction, semantic understanding, and downstream TTS tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, this work does not present significant weaknesses. However, the design of a highly powerful decoder in WavTokenizer raises some concerns. Specifically, I am concerned that the increased model parameters and the introduction of attention mechanisms may potentially slow down the codec's reconstruction speed?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Did the authors try other techniques to improve VQ codebook utilization? The current approach closely mirrors EnCodec, but DAC demonstrates that low-dimensional code lookups and L2-normalization can significantly improve RVQ scalability. A useful reference to consider is [1], which shows effective scaling strategies in image reconstruction that could be also valuable for audio codecs. I recommend continuing work on the paper, as a single-quantizer audio codec has potential, but the paper’s current scientific contribution is limited.\n\n\n2. What is the motivation for evaluating semantic representation? Neural audio codecs are expected to encode low-level acoustic features rather than abstract semantic concepts. Comparing audio codecs on semantic representation could be misleading, especially given the results e.g. all codecs score below 10% on the SLURP dataset, while self-supervised models like HuBERT achieve nearly 50% (not shown in this paper). This gap calls into question the statement on line 445 that \"WavTokenizer effectively captures rich semantic information\" which may be an overclaim. That said, audio codecs might have a significant impact on representation learning, as shown by EnCodecMAE [2], so it may be more appropriate to treat semantic representation as a downstream task. WavTokenizer's single codebook could be particularly useful for discrete targets in BERT-like setups.\n\n[1] Zhu, Lei, et al. \"Scaling the Codebook Size of VQGAN to 100,000 with a Utilization Rate of 99%.\"\n\n[2] Pepino, Leonardo, Pablo Riera, and Luciana Ferrer. \"EnCodecMAE: Leveraging neural codecs for universal audio representation learning.\""
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper effectively motivates and addresses an important problem in neural audio codecs - quantizing audio into a single sequence of tokens rather than multiple sequences, which complicates modeling for downstream tasks.\n- It presents useful findings regarding decoder design, which are supported by the ablation study. In particular, a Fourier-based decoder combined with an attention layer yields better results, while a time-domain decoder with attention performs surprisingly worse."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces WavTokenizer, a GAN-based neural audio codec that uses a single vector quantization (VQ) codebook, in contrast to previous methods that rely on multiple residual vector quantization (RVQ) codebooks. This results in a bitrate as low as 480 bits per second, and to recover quality, the authors propose replacing the time-domain decoder with a Fourier-based decoder, preceded by attention layers. The ablation study confirms these design choices, and a comprehensive evaluation, including both subjective and objective metrics, shows that WavTokenizer maintains competitive reconstruction quality with state-of-the-art models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the motivation for scaling the single VQ codebook to many entries is clear, the paper falls short of achieving high codebook utilization when expanding beyond a size of 4096. What the authors list as contributions in VQ, such as k-means initialization and random restarts, are in fact well-established techniques in neural audio compression, and this paper doesn’t offer any novel methods to improve codebook usage. This is somewhat disappointing, given that a key focus of the paper is to provide a single quantizer. More experimentation to scale the VQ codebook is needed, as the current contribution feels more incremental and may be better suited for a different venue."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024wavtokenizer,\ntitle={WavTokenizer: an Efficient Acoustic Discrete Codec Tokenizer for Audio Language Modeling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yBlVlS2Fd9},\nnote={under review}\n}"
},
"abstract": {
"value": "Language models have been effectively applied to modeling natural signals, such as images, video, speech, and audio. A crucial component of these models is the codec tokenizer, which compresses high-dimensional natural signals into lower-dimensional discrete tokens. In this paper, we introduce WavTokenizer, which offers several advantages over previous SOTA acoustic codec models in the audio domain: 1) extreme compression. By compressing the layers of quantizers and the temporal dimension of the discrete codec, one-second audio of 24kHz sampling rate requires only a single quantizer with 40 or 75 tokens. 2) improved subjective quality. Despite the reduced number of tokens, WavTokenizer achieves state-of-the-art reconstruction quality with outstanding UTMOS scores and inherently contains richer semantic information. Specifically, we achieve these results by designing a broader VQ space, extended contextual windows, and improved attention networks, as well as introducing a powerful multi-scale discriminator and an inverse Fourier transform structure. We conducted extensive reconstruction experiments in the domains of speech, audio, and music. WavTokenizer exhibited strong performance across various objective and subjective metrics compared to state-of-the-art models. We also tested semantic information, VQ utilization, and adaptability to generative models. Comprehensive ablation studies confirm the necessity of each module in WavTokenizer. The demo is available at https://wavtokenizer.github.io/."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"speech representation",
"discrete codec",
"audio language model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/50b0c8ec4bdd0bae24d37b95064f738115dc0864.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "WavTokenizer: an Efficient Acoustic Discrete Codec Tokenizer for Audio Language Modeling"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yCAigmDGVy | HiQ-Lip: A Quantum-Classical Hierarchical Method for Global Lipschitz Constant Estimation of ReLU Networks | main | Active | Quantum Computing;Lipschitz Constant;Neural Network;Quantum-Classical Hybrid Method;Coherent Ising Machine;QUBO | learning theory | 3;3;3;5;6 | 4;3;3;4;4 | 2;2;3;2;4 | 2;2;2;2;1 | 3;3;3;3;4 | 4 | 3.6 | 2.6 | 1.8 | 3.2 | 0.645497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No concerns."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Could this balance of bound conservativeness and faster computation speed also be achieved if we approach smaller subproblems with classical approaches? I wouldn’t expect it to be as drastic as the HiQ-Lip, but it might make the classical computation times much more reasonable for the scales presented in the paper.\n\n- The paper refers to the paper (Bartlett et al., 2017) as justification for the improved scaling constant. It would be helpful to point to a specific result in that paper since there is no other discussion in the paper about where it comes from or how it's derived."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- This is the first work of my knowledge to utilize quantum hardware models to accelerate Lipschitz constant estimation and is interesting in concept. The graph coarsening and refinement strategy appears to be a novel contribution of theirs which seems generally useful.\n\n- The computation times reported for 3-5 layer networks show two orders of magnitude speed up over GeoLip while only being slightly more conservative. This is a promising result.\n\n- The paper is generally well-written and the theory is accessible to most readers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a hybrid quantum-classical algorithm for estimating the $L_\\infty \\rightarrow L_1$ Lipschitz constant of a fully-connected neural network. This is achieved by converting the Lipschitz constant estimation problem into an equivalent cut-norm problem which can be solved efficiently on a quantum device with the so-called Quadratic Unconstrained Binary Optimization (QUBO) model. Due to hardware limitations of current quantum devices, a graph coarsening and refinement strategy is used to break the problem into subproblems that can be handled by about 100 qubits.Their experiments show significant speed up in computation time for 2-5 layer MLPs over existing $L_\\infty \\rightarrow L_1$ Lipschitz estimation approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-Although the methodology is interesting in concept, the results for 1-5 layer MLP model trained on MNIST are not very intriguing from a practical standpoint. The paper would be stronger if there was some evidence for making progress in some down-stream application such as certified-robustness of the MNIST classifier. In that case, there are currently 1-Lipschitz regularized layers which can already achieve very good robustness results for large CNNs trained on MNIST [Prach 2022, Araujo 2023].\n\n-I feel that the community is not practically limited by scales of 3-5 layers shown in the paper. In my opinion the current challenges in Lipschitz estimation are more for larger scale models like ResNets trained on ImageNET with the goal to achieve some non-trivial certified robustness for classification. At this scale it's more of a memory limitation issue for solving SDPs (which can also be broken into sub-problems). At this scale, I’m concerned that the quantum hardware limitations are going to make the bounds too conservative. There might be more potential for GPU acceleration on classical computers [Wang 2024].\n\n- It's difficult to interpret the computation times presented in the paper since each algorithm is supposedly using a different computing architecture. Since HiQ-Lip is a hybrid quantum-classical algorithm, is it using the same classical architecture as the baselines with an additional simulated 100 qubits? I think this practical aspect requires more explanation somewhere in the paper.\n\n[Prach 2022] Almost-Orthogonal Layers for Efficient General-Purpose Lipschitz Networks\n\n[Araujo 2023] A Unified Algebraic Perspective on Lipschitz Neural Networks\n\n[Wang 2024] On the Scalability and Memory Efficiency of Semidefinite Programs for Lipschitz Constant Estimation of Neural Networks"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The experimental setup described in Section 6 lacks clarity, particularly regarding the software and hardware configurations used.\n2. How is the computation time for HiQ-Lip determined? Since the experiments are conducted on a simulated CIM, how is the time for the quantum computation component accounted for?\n3. Comparing Table 4 with Table 2 reveals that networks with a larger number of layers appear to require less time for estimation than two-layer networks, which contradicts my expectations. Can you provide a more detailed explanation of this phenomenon?\n4. A more comprehensive explanation of the coefficients used for 3- to 5-layer networks (i.e., HiQ-Lip MP B) would be appreciated."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The work is the first attempt to employ quantum computing to handle the task of Lipschitz constant estimation.\n2. The hierarchical strategy for solving large-scale QUBO problems makes it possible to apply HiQ-Lip on small-scale devices.\n3. The simulation results show the advantage of HiQ-Lip in computation time.\n4. The presentation of this paper is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors introduce HiQ-Lip, a hybrid quantum-classical hierarchical method for estimating the global Lipschitz constant of neural networks. The problem of Lipschitz constant estimation is first transformed into a QUBO problem. Subsequently, this QUBO problem is solved hierarchically on Coherent Ising Machines (CIMs) to accommodate the system size supported by quantum devices. Experimental results on multi-layer perceptrons (MLPs) demonstrate that their method provides estimations comparable to classical state-of-the-art methods while requiring less computation time."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. As the paper primarily focuses on applying quantum computing to global Lipschitz constant estimation, it is uncertain whether the ICLR community will find this topic compelling.\n2. The paper lacks discussion on the theoretical guarantee about the approximation ratio of the hierarchical strategy to the global optimal of original QUBO.\n3. The experimental results are derived entirely from simulations under ideal conditions, without consideration for practical aspects of quantum devices such as finite shots, device noise, and limited coherence time. These non-ignorable imperfections could significantly impact the quality of solutions obtained from quantum algorithms in practice."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "I don't see any concerns for this paper."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is it possible to scale the proposed method for larger networks?\n\n2. In the l2 case, LipSDP has been extended for implicit networks (R1, R2), residual networks (R3) and more general structures (R4). See the following papers:\n\n [R1]: Revan et.al. Lipschitz bounded equilibrium networks.\n\n [R2]: Havens at al. Exploiting connections between Lipschitz structures for certifiably robust deep equilibrium models. NeurIPS 2023\n\n [R3]: Araujo et.al. A unified algebraic perspective on Lipschitz neural networks. ICLR 2023.\n\n [R4]: Fazlyab et. al. Certified robustness via dynamic margin maximization and improved lipschitz regularization. NeurIPS 2023\n\n It will be very interesting if the authors can address more general network structures for the l-infinity case. Can the authors comment on the possibility of such extensions?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The idea from this paper is new and original.\n\n2. The perspective on using small-scale quantum devices for Lipschitz estimation of neural networks is new."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims improving the scalability of SDP-based Lipschitz estimation of neural networks via developing HiQ-Lip, a hybrid quantum-classical hierarchical method that relies on Coherent Ising Machines (CIMs). The authors convert the problem into a Quadratic Unconstrained Binary Optimization (QUBO) problem and claim that this is more adaptable to the constraints of contemporary quantum hardware. Finally, the authors provide some experiments on fully connected neural networks on MNIST to show that their method is comparable to GeoLip but accelerates the computation process for such relatively small scale problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The scale of the experiments is quite small. Recently, for l_2 Lipschitz bounds, the SDP method has already been scaled to ImageNet by the following paper:\n\nZi Wang et.al. (ICLR2024): On the scalability and memory efficiency of semidefinite programs for Lipschitz constant estimation of neural networks.\n\n The authors seem not aware of the above result which achieves scalability to ImageNet. The authors studied the l-infinity case here, but the scale is on the MNIST level. This makes me think the contribution by the authors is very incremental in comparison to the original GeoLip paper. A few ways to make the contributions more significant include: 1. demonstrate the proposed method on large scale networks; 2) extend the method for more network structures, e.g. implicit models, residual network, etc."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Any reason why you didn't consider Quantum Annealers (QAs) with larger number of qubits such as DWave Advantage system that have ~5000 qubits? Could you potentially work with larger neural networks given up to 5000 qubits? \n2. Are all the experiments limited only to 100 qubits or variables? Have you thought about how varying the limit on the number of qubits affects the quality of the estimation? \n3. Have you thought about whether there are limits in terms of the size of the neural networks that you can use this approach with? \n4. If you use this approach with neural networks of size larger than the ones considered in the experimental evaluation and fix the maximum number of qubits to 100, can you quantify the performance degradation from the additional graph coarsening and subsequent refinement through experiments or theory?\n5. In all your experiments, the estimates of the global Lipschitz constant come close to GeoLip's estimates, have you tried to increase the size of the neural network in terms of number of hidden neurons or number of layers till either your approach fails to give close estimates to GeoLip? I would expect some performance degradation with larger neural networks due to inexact nature of estimation using graph coarsening/refinement."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "One of the strengths of the paper is that it presents a systematic and scalable way to frame the neural network global Lipschitz constant estimation problem as a QUBO that can be solved with a limited number of qubits. This approach could be adapted to other problems related to neural networks such as neural network training or neural network verification. The experimental approach is also very sound as the authors compared with a number of different methods to estimate the global Lipschitz constant such as GeoLip, LipOpt, Matrix Product (MP), Sampling and Brute Force (BF) and tried two-differnt scaling . The time comparison with LipOpt and GeoLip show that the approach offers time-savings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of finding the global Lipschitz constant of ReLU neural networks using a hybrid quantum-classical hierarchical method. The problem of estimating the global Lipschitz constant of a ReLU neural network is converted into a Quadratic Unconstrained Binary Optimization (QUBO) problem. To address address the issue of limited number of qubits, the paper proposes a new HiQ-Lip algorithm that works by first translating the structure of the neural network into a graph with each node representing a neuron and edges representing the connection strengths, employing graph coarsening to reduce the number of nodes by merging them until the resulting QUBO can be solved directly on a small quantum-annealing based computer, then solving the QUBO and finally mapping the approximate solution from the coarse graph back to the original graph by solving optimization subproblems. The paper finally presents experiments with a two-layer neural network with varying number of hidden neurons and deeper networks with varying number of layers and shows that HiQ-Lip doubles the solving speed of and provides more accurate upper bound (using GeoLip as gold standard) compared to the existing best method LiPopt for two-layer networks and GeoLip for multi-layer networks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This paper doesn't take into account the latest state-of-art in terms of Quantum Annealers (QA) such as DWave Advantage System (https://www.dwavesys.com/solutions-and-products/systems/) that have ~5000 qubits. They limited themselves only to 100 qubits and simulated CIM. They could have scaled out to larger number of qubits and explored what is the tradeoff between using larger number of qubits versus the graph coarsening/refinement strategy in terms of time saving or estimation quality. Given that larger of qubits are available, what is the value of the graph coarsening/refinement approach? There is a lack of assessment on how much performance degradation arises from graph coarsening and refinement."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. As far as I can tell, the evaluation was conducted on a simulated CIM rather than actual quantum hardware. Are all experiments run on the same architecture? I did not find this clarification in the paper. Clarification on the uniformity of the experimental setup across all evaluations is necessary for ensuring reproducibility and accurate interpretation of results, especially since the advantage of HiQ-Lip mostly comes from runtime.\n\n2. Given that GeoLIP's foundations in Grothendieck inequalities and the Unique Games Conjecture (UGC) suggest the difficulty of improvement within polynomial time, the precision similarity between HiQ-Lip (on simulated CIM) and GeoLIP on two-layer networks raises intriguing questions. Could the authors explain this similarity? This could imply a consistent performance on similar problems, such as the cut-norm problem, and might have crucial theoretical implications, for example, regarding the validity of the UGC."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The estimation of Lipschitz constants in neural networks is an important problem in deep learning research, with implications for model robustness and generalization.\n\n2. Applying quantum algorithms to deep learning remains an understudied domain, offering potentially promising avenues for exploration, particularly in light of the increasing sophistication of quantum computing devices."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study presents HiQ-Lip, a hybrid quantum-classical hierarchical approach for estimating the global Lipschitz constant of neural networks. The methodology involves reformulating the Lipschitz constant estimation as a Quadratic Unconstrained Binary Optimization (QUBO) problem, making it suitable for quantum algorithmic solutions. To address the limitations of current quantum devices, the authors introduce a multilevel graph coarsening and refinement strategy, enabling the adaptation of neural network structures to quantum hardware constraints. Empirical evaluations are conducted to validate the effectiveness of the proposed methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Novelty is limited: The conversion of the Lipschitz constant problem to QUBO and mixed-norm formulations was established in [1]; the application of CIM to QUBO was known. The primary algorithmic contribution, a graph coarsening and refinement strategy, is a heuristic and lacks theoretical guarantees.\n\n2. The baseline comparison is insufficient: Authors claim that current SDP methods face challenges such as high memory usage and slow processing speeds. This is true for generic SDP solvers. However, recent advancements in SDP methods have significantly improved efficiency for deep networks and convolutional architectures. For example, [2] has improved the SDP for very deep networks, and [3] has extended the SDP resolution to convolutional networks. Although these works focus on $\\ell_2$ Lipchitz constant estimation, I don't see why they cannot be extended to $\\ell_\\infty$ Lipschitz constant. [1] has pointed out that there are no fundamental differences between $\\ell_2$ and $\\ell_\\infty$ SDPs.\n\n3. The evaluation methodology raises concerns: HiQ-Lip demonstrates inferior precision compared to GeoLIP [1], with improvements primarily in runtime. However, runtime comparisons are implementation and architecture-dependent and do not account for more efficient, tailored SDP solvers (see above). Additionally, the reported runtimes exhibit inconsistencies, with more complex networks (Net3-Net5) showing significantly shorter processing times than simpler ones (Net2), casting doubt on the reliability of the performance metrics. For example, HiQ-Lip for Net-2 takes 30 seconds, while solving Net3 only takes 6.5 seconds.\n\n\nMinor: \n\nBecause converting the Lipschitz constant problem to QUBO and mixed-norm problems was already established in [1], the authors might consider properly crediting these to [1] in section 3. Most of the content was already presented in [1].\n\n[1] Zi Wang, Gautam Prakriya, and Somesh Jha. A quantitative geometric approach to neural-network smoothness.\n\n[2] Anton Xue, Lars Lindemann, Alexander Robey, Hamed Hassani, George J. Pappas, and Rajeev Alur. Chordal sparsity for lipschitz constant estimation of deep neural networks.\n\n[3] Zi Wang, Bin Hu, Aaron J Havens, Alexandre Araujo, Yang Zheng, Yudong Chen, Somesh Jha. On the Scalability and Memory Efficiency of Semidefinite Programs for Lipschitz Constant Estimation of Neural Networks"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "HiQ-Lip accelerates tight Lipschitz constant estimation for neural networks using small-scale quantum devices, outperforming state-of-the-art methods in speed."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hiqlip,\ntitle={HiQ-Lip: A Quantum-Classical Hierarchical Method for Global Lipschitz Constant Estimation of Re{LU} Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yCAigmDGVy},\nnote={under review}\n}"
},
"abstract": {
"value": "Estimating the global Lipschitz constant of neural networks is crucial for understanding and improving their robustness and generalization capabilities. However, precise calculations are NP-hard, and current semidefinite programming (SDP) methods face challenges such as high memory usage and slow processing speeds. In this paper, we propose $\\textbf{HiQ-Lip}$, a hybrid quantum-classical hierarchical method that leverages Coherent Ising Machines (CIMs) to estimate the global Lipschitz constant. \nWe tackle the estimation by converting it into a Quadratic Unconstrained Binary Optimization (QUBO) problem and implement a multilevel graph coarsening and refinement strategy to adapt to the constraints of contemporary quantum hardware. \nOur experimental evaluations on fully connected neural networks demonstrate that HiQ-Lip not only provides estimates comparable to state-of-the-art methods but also significantly accelerates the computation process. \nIn specific tests involving two-layer neural networks with 256 hidden neurons, HiQ-Lip doubles the solving speed and offers more accurate upper bounds than the existing best method, LiPopt.\nThese findings highlight the promising utility of small-scale quantum devices in advancing the estimation of neural network"
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Quantum Computing",
"Lipschitz Constant",
"Neural Network",
"Quantum-Classical Hybrid Method",
"Coherent Ising Machine",
"QUBO"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a2683f090808e1114171c021b20612915cd67603.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "HiQ-Lip: A Quantum-Classical Hierarchical Method for Global Lipschitz Constant Estimation of ReLU Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yCEf1cJDGh | Truthful Aggregation of LLMs with an Application to Online Advertising | main | Active | mechanism design;llm;auction;online advertising | alignment, fairness, safety, privacy, and societal considerations | 3;5;5;6 | 4;3;3;2 | 2;2;2;3 | 1;2;2;3 | 2;3;3;3 | 4.75 | 3 | 2.25 | 2 | 2.75 | -0.973329 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide the specific prompts utilized in the experiments detailed in the paper? Additionally, could you further discuss how different prompts might affect the mechanism's performance?\n2. Could the authors extend their experiment to include comparison with other mechanisms from similar studies?\n3. Given the paper's claim that the proposed mechanism can operate without LLM fine-tuning or access to model weights, could the authors extend the experiments to include utilization of various closed-source models?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well structured, offering clear explanations of the problems addressed and the key terms used.\n2. The proposed mechanism has a firm theoretical grounding.\n3. The proposed mechanism seems to demonstrate strong applicability to real-world scenarios, especially in the realm of online advertising with LLM-generated content."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces an auction mechanism, MOSAIC, to aggregate the preferences of multiple self-interested advertisers over LLM-generated replies. The authors claim that this mechanism can converge to the outputs of the optimally fine-tuned LLM as computational resources increase, without requiring fine-tuning or access to model weights. The authors also present context-aware versions of MOSAIC, which accelerates convergence and yields high advertiser value and platform revenue. Experiments with a publicly available LLM demonstrate that MOSAIC achieves high advertiser value and platform revenue with minimal computational overhead."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The novelty of this paper is limited and the scientific contribution seems to be not obvious. The proposed mechanism addresses the problems using conventional approaches. The authors can strengthen the differences between their proposed mechanism and the algorithms used in standard RLHF and the rejection sampling.\n2. The experiments are insufficient. There are no comparative analyses between MOSAIC and other mechanisms, which makes it difficult to comprehensively assess its performance. The experiments are restricted to the use of Llama-2-7b-chat-hf, lacking generalizability across different LLMs. It would be nice to compare with other mechanisms mentioned in the related work and evaluate across different LLMs like Llama-3-8B, T5, and Mistral-7B."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* In equation (1) in line 152, why do you define KL-divergence of the two distributions and why $\\pi_{ref}$ is on the right hand? Will other choices fail?\n * In the paragraph between line 196 and 201, the author says, \"*The VCG allocation rule requires calculating the exact optimal solution to the optimization problem, which is intractable for choosing an LLM to maximize Equation (1) and is even difficult for choosing a single optimal sequence. If a sub-optimal solution is chosen, VCG’s strategyproofness is no longer guaranteed (Nisan & Ronen, 2007; 1999; Lehmann et al., 2002).*\"\n * In my understanding, choosing an LLM to maximize Equation (1) is equivalent to choosing an LLM that satisfies (2). Since we already have access on $\\pi_{ref}$, when $r(x,y) = \\sum_i r_i(x,y)$ is upper bounded given $x$, it's feasible to sample from (2) by rejection sampling. Besides, we can also use $\\pi_{con}$ replacing $\\pi_{ref}$ to decrease variations. In this sense, I do not find the advantage of using MOSAIC rather than using VCG directly. Could you make some clarifications on this concern?\n * Even in the case that $r(x,y)$ is not upper bounded and VCG can not be implemented, it's possible that sub-optimal solutions are chosen and strategyproofness still hold, MOSAIC is exactly one example. Besides, I think MOSAIC will also behave badly in this case because the probability of large value of $r(x, y)$ is small, and it's likely to sample $M$ candidates $y_1,...,y_M$ with all $r(x,y_i)$s are small."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The author provides an example throughout the paper, making the model easy to understand. The aggregation of LLMs is an important topic, while the introducing of regularization of user preference is novel. The idea of approximating VCG is also interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a mechanism called MOSAIC, that aggregates multiple advertising LLMs (represented by reward functions) and try to find a distribution on replies that maximizes the total rewards of advertisers together with a KL-divergence regularization term on user preference $\\pi_{ref}$. MOSAIC takes the reward functions as input, and output a (stochastic) reply to the user.\n\nMOSAIC first samples $M$ candidate replies from arbitrary general and pre-defined distribution $\\pi_{gen}$. After this, MOSAIC computes the distribution on replies (regarding the distributions as mechanism allocation) and sample one reply from this distribution as final output. The mechanism payments are computed by Rochet payment (1987). As $M$ tends to infinity, MOSAIC is guaranteed to converge to VCG mechanism.\n\nIn experiments, the paper specifies reward functions and $\\pi_{gen}$ by LLM distributions and contextually prompting LLMs, respectively. Experiments show that contextual-prompting MOSAIC performs far better than naive MOSAIC with $\\pi_{gen} = \\pi_{ref}$, with the log-probability of reply close to optimal distribution."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major Issues:\n\n* I think the main drawback of this paper is that MOSAIC is only an approximation of VCG mechanism. Moreover, such approximatio\nn is natural and not challenging to discover and define. Specifically, VCG outputs the distribution on full reply space, while MOSAIC\n first discretizes the full space into finite points and then outputs a distribution on finite points. This operation is simple a\nnd do not change the nature of VCG mechanism. Therefore, I believe that the contribution of MOSAIC is not significant.\n* The other contributions of this paper, eg, context-aware LLM in Line 237 $\\pi_{con}$, and payment offset in Line 335, are case-by-case operations and do not bring significance on the mechanism itself. These ideas are also natural. Besides, the payment contribution is a direct application of Rochet payment (1987). Overall, I believe that the contributions are incremental and limited.\n* Regarding the experiments, the authors only compare the contextual-prompting MOSAIC with naive MOSAIC. There is no comparison\n with baselines, thus I cannot evaluate whether the performance of MOSAIC is acceptable from these experiments. A suggestion is\n to compare MOSAIC with VCG mechanism directly when VCG is applicable.\n\nBesides, the presentation needs improvement. For example,\n\n* In Line 419, it appears $\\pi_i$. However, $\\pi_i$ is only mentioned in Line 146. The definition of $\\pi_i$ should be better placed between its first use, i.e., just before Line 419.\n* In Section 1.2, the authors mentioned 7 contributions of this paper. However, these contributions are incremental, or only the technical details appeared in this paper.\n* In the paragraph of Line 242, the notation $\\pi_r$ have never defined before these appearances. I think it should be $\\pi_{ref}$.\n* In line 313-314, I think that the term $\\log (\\frac{\\pi_{ref}}{\\pi_{gen}}) should be included in the $\\exp(\\cdot)$.\n* In the appendix, the sections are titled with \"Details from Section xx\", \"Omitted Proofs from Section xx\". It is conventional to use 'in' instead of 'from'."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well written and organized.\n- LLM and its application to online advertising, especially the domain of ad auctions, is a novel area to be studied with practical relevance.\n- The theoretical results provided appear sound. The authors also clearly stated the difference between their proposed mechanism and previous auction mechanisms such as VCG.\n- The numerical experiments (and the motivating example) provide interesting insights."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors studied a setting in which the advertisers would like to influence the LLMs to generate their preferred contents and the platform would need to satisfy both advertiser preference and user utilities. The authors proposed an auction mechanism called MOSAIC that enjoys a number of advantages, including ensuring that truthful reporting is a dominant strategy for advertisers. The authors also suggested that the proposed mechanism is equipped with technical feasibility and practicality. To validate their claims, the authors provided both theoretical results and numerical experiments with a publicly available LLM."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I wonder if the authors can provide more explanations for why the ref LLM is not performing as well as the context-aware LLM. I understand that the authors have provided some intuitive explanations in Sections 4, but the ref LLM still appears to be an intuitive choice based on Corollary 4.1. Would it be possible to gain more insights into this difference from a theoretical point of view? Does the authors have any preliminary insights for how to introduce contextual information into the current model? \n- The current framework does not consider a number of constraints that could impact the advertisers' decision-making process, such as each advertiser's budget and/or ROI constraints, or the maximum length of the LLM output. I wonder if the authors can comment on whether their mechanism can incorporate any of the above features."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The paper claims that MOSAIC’s allocation rule converges to an optimal distribution as computational resources increase. How does the rate of convergence depend on the number of candidate sequences?\n\n2. The allocation rule is based on importance sampling to estimate the probability distribution over sequences. What is the mathematical form of the variance for this estimator?\n\n3. How does the choice of the hyperparameter \\tau in front of the KL term quantitatively influence the trade-off between advertiser reward maximization and alignment with the reference LLM?\n\n4. The strategyproofness guarantee hinges on honest reporting by advertisers, but how robust is MOSAIC if advertisers engage in complex forms of gaming or misreporting? \n\n5. In scenarios with advertisers holding strongly opposing interests, does MOSAIC risk generating responses that conflict with each other?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper introduces MOSAIC which effectively combines the preferences of self-interested advertisers while maintaining user-centric content. This is achieved without requiring direct access to LLM weights, which adds flexibility and broad applicability.\n\nThe mechanism ensures that truthful reporting is a dominant strategy for advertisers, thanks to its carefully constructed payment and allocation rules. This strategy-proofness is backed by theoretical proofs.\n\nThe design is computationally efficient, using only API access and avoiding expensive fine-tuning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces MOSAIC, an auction mechanism designed for aggregating advertiser preferences within the outputs of large language models (LLMs), particularly for applications in online advertising. The authors address the challenge of balancing the interests of multiple advertisers with user-centric content by creating a mechanism that incentivizes truthful reporting from advertisers. MOSAIC uses an approach that combines an allocation rule based on importance sampling, allowing it to converge to the optimal output distribution as computational resources increase, without requiring direct model fine-tuning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There is no real-world testing or application with actual advertiser data to validate the effectiveness of MOSAIC in practical applications.\n\n2. Although MOSAIC is designed to be efficient, it may still face scalability challenges as the number of advertisers or candidate replies grows. An analysis of how the mechanism handles these scenarios under different computational limits would strengthen the paper.\n\n3. Given that the mechanism optimizes for advertiser rewards and alignment with a reference LLM, there’s a risk of unintentional bias in the final output.\n\n4. MOSAIC relies on advertisers truthfully reporting their preferences and reward functions, yet it does not fully address how misreporting could impact results. Further discussion on mid-reporting would be important."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "we give a truthful auction mechanism allowing agents to bid to influence LLM outputs, with a focus on advertising applications."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024truthful,\ntitle={Truthful Aggregation of {LLM}s with an Application to Online Advertising},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yCEf1cJDGh},\nnote={under review}\n}"
},
"abstract": {
"value": "The next frontier of online advertising is revenue generation from LLM-generated content. We consider a setting where advertisers aim to influence the responses of an LLM to align with their interests, while platforms seek to maximize advertiser value and ensure user satisfaction. The challenge is that advertisers may misreport their preferences. To address this, we introduce MOSAIC, an auction mechanism that ensures that truthful reporting is a dominant strategy for advertisers, and which aligns each advertiser’s utility with their contribution to social welfare. Importantly, the mechanism operates without LLM fine-tuning or access to model weights and provably converges to the output of the optimally fine-tuned LLM for the platform’s objective as computational resources increase. Additionally, it can incorporate contextual information about the advertisers, accelerating convergence. Via experiments with a publicly available LLM, we show that MOSAIC significantly boosts advertiser value and platform revenue with low computational overhead. While our motivating application is online advertising, our mechanism can be applied in any setting with monetary transfers, making it a general-purpose solution for truthfully aggregating the preferences of self-interested agents over LLM-generated replies."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"mechanism design",
"llm",
"auction",
"online advertising"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f643fac9096b0ed41b118fb55ebfb16cd221e38d.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/0350cb2e1b39da55d4afc94c5cd46dde64cb9b40.zip"
},
"title": {
"value": "Truthful Aggregation of LLMs with an Application to Online Advertising"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yCN4yI6zhH | GPromptShield: Elevating Resilience in Graph Prompt Tuning Against Adversarial Attacks | main | Active | pre-training; prompt tuning; robustness; adversarial attacks. | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;5;6 | 4;3;3 | 2;3;3 | 2;3;3 | 3;2;3 | 4.666667 | 3.333333 | 2.666667 | 2.666667 | 2.666667 | -0.944911 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Refer to the weaknesses above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed mixed multi-layer defense strategy in this paper demonstrates technical innovation by combining various defense mechanisms, such as feature-based prompting and real-time adjustments. This approach significantly enhances the system’s tolerance to adversarial attacks, offering a fresh perspective on the research of graphic prompting adjustment systems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel approach to graph prompt tuning focused on robustness in adversarial attack scenarios. It proposes a highly extensible shield defense system with a hybrid multi-defense prompt and robust prompt tuning strategy, demonstrating theoretical feasibility. Extensive experiments in few-shot scenarios under various adversarial attacks show that their strategies significantly enhance prompt tuning resilience in downstream biased tasks. The use of feature-based prompts allows real-time adjustments, and the paper highlights the effectiveness of their methods in both adaptive and non-adaptive attack scenarios."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "One weakness of the paper is the absence of a comparative analysis with existing systems or defense strategies against adversarial attacks. \n\nThe paper primarily focuses on theoretical analysis and experimental validation in controlled settings. However, the lack of real-world implementation or case studies hinders the practical applicability assessment of the proposed method. To enhance the relevance and impact of the research, future work could involve implementing the system in real-world scenarios and evaluating its performance in practical applications.\n\nThe evaluation section of the paper might benefit from a more comprehensive selection of evaluation metrics to assess the proposed method’s performance accurately. Specifically, incorporating additional metrics such as computational efficiency, scalability, or user experience aspects could provide a more holistic evaluation of the system’s capabilities and limitations. Including a diverse set of evaluation criteria would offer a more thorough understanding of the method’s strengths and weaknesses.\n\nThe paper could further strengthen its impact by discussing the generalizability of the proposed method to different types of adversarial attacks or diverse datasets. Providing insights into how the approach could be adapted or extended to address a broader range of security challenges would enhance the paper’s contribution to the field."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why other methods' results are getting close to the proposed methods in 10-shot experiments? Is it possible that other methods will outperform the proposed methods with even more shots?\n2. Adding the figures to represent prompt design and robust optimization strategies stages could be better."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea and structure is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores enhancing the robustness of graph prompt-tuning methods against adversarial attacks in graph neural networks (GNNs). It identifies a vulnerability in current prompt-based methods, which are highly susceptible to adversarial alterations. The proposed solution is a \"shield defense system\" that enhances robustness through two strategies: Direct Handling and Indirect Amplification. Direct Handling customizes multi-defense prompts based on node-specific attributes, targeting biased nodes introduced by attacks, while Indirect Amplification leverages few-shot learning and selectively focuses on untainted information, thereby circumventing the misleading data introduced by adversaries. Theoretical validation demonstrates that this approach can maintain unbiased outputs despite adversarial perturbations. Experimental results show that the defense system enhances resilience under various attack scenarios, outperforming existing prompts in maintaining accuracy and robustness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some of the tables are confusing, for example, the table 1. Pre-Training Strategies & Prompts appear simultaneously in the top left corner. Should we design the table in a better way or use other charts to represent these results?\n2. Section 2 Related work, if there is no more subsection 2.1, is not necessary.\n3. The whole framework should have a figure to clearly show the stages of prompt design and robust optimization strategies"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please check the question raised in the comments concerning the weakness of this study."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "First of all, this is an interesting work in investigating robust graph prompt tuning to increase the stability of GPL against potential graph poisoning attacks. Especially, if the test graph structure is intentionally manipulated, this method offers a solution to suppress the impact of the poisoning efforts over the decision of GPL."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a robust graph prompt learning method based on adversarial training to defend Graph Neural Networks (GNNs) against adversarial attacks. The method begins by identifying nodes typically targeted in these attacks, such as low-degree nodes, nodes with low central similarity within multi-hop subgraphs, and nodes linked to adversarially perturbed edges. Next, the approach fine-tunes three distinct types of graph prompts, each corresponding to one of the targeted node types. The objective of the prompt tuning process is to ensure: (1) smoothness in node embeddings within multi-hop subgraphs across the three types of graph prompts, (2) consistency between prompt-enhanced and unprompted node embeddings, and (3) smoothness across embeddings from different types of prompts. Finally, the study applies this method to multiple graph prompt learning models, evaluating its defense effectiveness against non-adaptive attacks and its performance in combination with other defense strategies for adaptive and graph poisoning attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are several points that need to be strengthened: \n\n1/ This work only focuses on node classification-oriented GPL and does not discuss graph poisoning attacks targeting at manipulating node attributes. It is not clear if the proposed robust prompt tuning method can be applied to edge or graph level learning tasks and can be adapted to both node- and edge-manipulation based attacks. I am asking this as GPL is a multi-task system that can transfer knowledge between different types of GNN learning tasks (node, edge and graph classification). From this perspective, the evaluation in this study is not sufficient. Extending the coverage over more graph datasets and GNN learning tasks should be considered. I'd recommend the author refer to the dataset choice and learning task configurations in [1]. \n\n[1] All in One: Multi-Task Prompting for Graph Neural Networks, https://arxiv.org/pdf/2307.01504. \n\n2/ The theoretical proof in Section 4.3 is not associated with the main claim (robust prompt tuning) in this work. This theoretical analysis should be dedicated to explain how the robustness is improved via the proposed tuning approach. However, the link between Section 4.3 and the algorithmic design is unclear in the current submission. \n\n3/ The ablation study may also need to consider one situation: what if every node belonging to the three types (low degree, low central similarity and out-of-distribution nodes) has the same graph prompting function ? Besides, which types of the nodes should be given more weights in the robust tuning process, or they are handled equally in adversarial attacks ? \n\n4/ What are out-of-distribution nodes in a graph? Citing the paper (Li et al) is not enough. Further discussion regarding what these nodes are and why they are concerned in adversarial attacks should be involved. \n\n5/ How do you choose the threshold values $\\tau_{degree}$ and $\\tau_{sim}$ ? Are they set empirically, or is there any protocol to choose adaptively in different GNN adversarial attack scenarios ? Similarly, how do you choose $\\tau_{tune}$ in Eq.15 ? Does it mean you completely exclude the subgraphs if the embeddings of nodes in these subgraphs meet the condition in Eq.15? Will it also bring harm to the utility of the trained GPL as it excludes information from the training process. \n\n6/ As an adversarial training method, it is not surprised to see the proposed defensive graph prompts can mitigate one attack method when they are trained to be resilient to this attack. I am wondering if the proposed method has the transferability. In particular, if it is trained with the perturbed graph using one of the attack methods (MetaAttack, Heuristic attack, Random attack, DICE) can be also resilient to the other three attacks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024gpromptshield,\ntitle={{GP}romptShield: Elevating Resilience in Graph Prompt Tuning Against Adversarial Attacks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yCN4yI6zhH},\nnote={under review}\n}"
},
"abstract": {
"value": "The paradigm of ``pre-training and prompt fine-tuning\", with its effectiveness and lightweight characteristics, has rapidly spread from the language field to the graph field. Several pioneering studies have designed specialized prompt functions for diverse downstream graph tasks based on various graph pre-training strategies. These prompts concentrate on the compatibility between the pre-training pretext and downstream graph tasks, aiming to bridge the gap between them. However, designing prompts to blindly adapt to downstream tasks based on this concept neglects crucial security issues. By conducting covert attacks on downstream graph data, we find that even when the downstream task data closely matches that of the pre-training tasks, it is still feasible to generate highly misleading prompts using simple deceptive techniques. In this paper, we shift the primary focus of graph prompts from compatibility to vulnerability issues in adversarial attack scenarios. We design a highly extensible shield defense system for the prompts, which enhances their robustness from two perspectives: Direct Handling and Indirect Amplification. When downstream graph data exhibits unreliable biases, the former directly combats invalid information by adding mixed multi-defense prompts to the input graph's feature space, while the latter employs a training strategy that circumvents invalid part and amplifies valid part. We provide a theoretical derivation that proves their feasibility, indicating that unbiased prompts exist under certain conditions on unreliable data. Extensive experiments across various adversarial attack scenarios indicate that the prompts within our shield defense system exhibit enhanced resilience and superiority. Our work explores new perspectives in the field of graph prompts, offering a novel option for downstream robust prompt fine-tuning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"pre-training; prompt tuning; robustness; adversarial attacks."
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/67c8ccfdd3a8c0838aaa8ff4715b94cc399ffaaa.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "GPromptShield: Elevating Resilience in Graph Prompt Tuning Against Adversarial Attacks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yCr55EjC1d | Node Duplication Improves Cold-start Link Prediction | main | Active | Graph Neural Network;Link Prediction;Cold-start;Graph Augmentation | learning on graphs and other geometries & topologies | 3;3;3;5 | 4;4;4;3 | 2;1;2;3 | 1;2;2;2 | 3;2;3;3 | 3.5 | 3.75 | 2 | 1.75 | 2.75 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide a theoretical explanation or analysis to clarify why NODEDUP’s node duplication approach is effective for improving cold-node representation?\n2. Since the authors claim that their method does not compromise warm-node performance, could they provide comparisons with some of the latest graph contrastive learning baselines, such as GCLMI [1], Sp^2GCL [2], POT [3], and GraphACL [4], proposed in 2024?\n\n\n\n[1] Xu, Yuhua, et al. \"Graph contrastive learning with min-max mutual information.\" *Information Sciences* 665 (2024): 120378.\n\n[2] Bo, Deyu, et al. \"Graph contrastive learning with stable and scalable spectral encoding.\" *Advances in Neural Information Processing Systems* 36 (2024).\n\n[3] Yu, Yue, et al. \"Provable training for graph contrastive learning.\" *Advances in Neural Information Processing Systems* 36 (2024).\n\n[4] Xiao, Teng, et al. \"Simple and asymmetric graph contrastive learning without augmentations.\" *Advances in Neural Information Processing Systems* 36 (2024)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. **Comprehensive Experimental Evaluation**: \n\n The paper is well-supported by extensive experimental results, covering a wide range of datasets and comparisons. This thorough empirical analysis demonstrates NODEDUP’s efficacy in addressing the cold-start problem, adding substantial credibility and depth to the findings.\n\n2. **Clear Presentation and Visualization**: \n\n The paper is well-organized and clearly written, with visual aids that effectively communicate the experimental results. The figures and tables are particularly helpful in understanding NODEDUP’s impact across different settings, making the paper accessible and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces NODEDUP, a novel augmentation technique aimed at enhancing the performance of graph neural networks (GNNs) in link prediction (LP) tasks, specifically for cold-start or low-degree nodes. By duplicating low-degree nodes and linking each to its duplicate, NODEDUP provides a “multi-view” representation that improves embeddings for cold nodes while retaining the performance of high-degree nodes. Extensive experimental results demonstrate NODEDUP's ability to achieve significant LP performance gains on cold nodes across multiple datasets, highlighting its potential to address limitations in existing GNN-based LP methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While NODEDUP demonstrates effectiveness in addressing the cold-start link prediction problem, several critical limitations should be considered:\n\n1. **Limited Scalability**: \n\n NODEDUP's design focuses heavily on the \"duplication of cold nodes,\" making it overly specialized for the cold-start problem and potentially too simple for broader link prediction tasks. The technique lacks flexibility to handle other challenges, such as heterophilic graphs or highly dynamic networks, which require adaptable augmentation strategies. This limits NODEDUP’s versatility as a universal approach in graph contrastive learning.\n\n2. **Lack of Theoretical Foundation**: \n\n Despite extensive empirical results, the paper does not provide a theoretical explanation for why NODEDUP improves cold-node representation. A theoretical analysis would strengthen the validity and interpretability of the method, clarifying how and why node duplication facilitates performance gains for cold nodes in GNNs.\n\n3. **Outdated Baselines**: \n\n Although the authors claim that NODEDUP does not compromise overall performance while addressing the cold-start problem, they compare it only against older baselines rather than the latest advancements in graph contrastive learning. This raises questions about how NODEDUP’s performance aligns with state-of-the-art methods and whether it may lag behind the latest graph contrastive learning approaches in overall link prediction performance.\n\n4. **High Dependency on Hyperparameters**: \n\n The proposed NODEDUP method relies heavily on the selection of the hyperparameter δ, which determines the distinction between cold and warm nodes. Although the authors provide a heuristic method and a hyperparameter search experiment, they do not offer a clear rationale for selecting a specific value of δ. This raises concerns that an optimal δ may need to be tuned individually for each dataset, potentially limiting the method’s practical applicability and generalizability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In Section 3.2, you mention that duplicating cold nodes provides additional view for aggregation. However, could you clarify how this extra view differs fundamentally from simply adjusting the weights assigned to a node's own representation? What specific advantages does node duplication offer that cannot be achieved through weight adjustments alone? And how is it related to helping cold-start LP\n\nIn Section 3.2, you argue that more supervision signals for cold nodes can lead to better-quality embeddings. Can you provide a more detailed explanation of how the link between a cold node and its duplicate functions as a meaningful supervision signal? What evidence or reasoning supports the claim that this relationship enhances the learning process in a way that traditional training methods (e.g. self-loop) do not?\n\nIn your paper, you define cold-start nodes using a fixed threshold of 2 for node degrees. Given that the effectiveness of this threshold may vary based on different graph properties, such as node degree distribution, and overall connectivity, how do you justify the choice of this specific threshold? Have you considered the impact of varying this threshold on the identification of cold-start nodes and the subsequent performance of your proposed method?\n\nIs there any reasoning why NodeDup and NodeDup(L) consistently outperforms GSage in the warm nodes and overall settings. As explained in the paper, the data augmentation is done on cold-start nodes only. How does it affect the performance of the warm nodes and overall settings?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed method stands out for its simplicity and ease of integration with existing GNN architectures. By requiring only the duplication of low-degree nodes and the establishment of connections between these duplicates, it provides a straightforward augmentation technique that can be seamlessly applied to various GNN models.\n\nThe paper has extensive experiments to demonstrate the effectiveness and superiority of the proposed method. By evaluating the approach across multiple benchmark datasets, the authors provide strong empirical evidence supporting their claims."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a simple yet effective graph data augmentation method called NodeDup to improve the performance of Graph Neural Networks (GNNs) in link prediction tasks, particularly for low-degree or \"cold\" nodes. The method duplicates low-degree nodes and creates links to their duplicates, thereby providing a \"multi-view\" perspective during training. It addresses the well-known cold-start problem in recommendation systems and other applications by augmenting the training data without sacrificing the performance of well-connected nodes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper appears to lack sufficient novelty in its contributions to the field of link prediction. The underlying idea of augmenting data through duplication has been explored in various contexts. Additionally, the paper does not convincingly demonstrate how NodeDup outperforms or fundamentally enhances prior methods, leading to concerns about the overall impact of the contribution.\n\nAlthough the paper provides some justification for the proposed method, it could delve deeper into the theoretical foundations of why node duplication is particularly effective for cold-start link prediction. More comprehensive discussions could help solidify the rationale behind the approach.\n\nThe paper lacks a thorough investigation into the node degree distribution of the graphs used in the experiments, which can significantly impact the efficiency of the proposed method. In cases where the graph exhibits a highly skewed degree distribution, the additional complexity introduced by duplicating cold-start nodes could potentially lead to a doubling of the original computational complexity."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see my previous comment."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The paper is clearly written and easy to understand.\n\n2. The authors claim that the proposed method demonstrates significant improvements in performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose NODEDUP, a simple yet effective augmentation technique aimed at improving link prediction (LP) performance for low-degree nodes in Graph Neural Networks (GNNs) while preserving performance for high-degree nodes. NODEDUP works by duplicating low-degree nodes and creating links to their duplicates before applying standard supervised LP training. This “multi-view” approach significantly enhances LP performance for low-degree nodes without negatively impacting high-degree nodes. As a plug-and-play module, NODEDUP integrates easily into existing GNNs with minimal computational cost. Extensive experiments show average relative improvements of 38.49%, 13.34%, and 6.76% for isolated, low-degree, and warm nodes, respectively, compared to traditional GNNs and state-of-the-art cold-start methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The problem addressed in this paper does not align with the experimental datasets used. The cold start issue is a significant challenge in recommendation systems, and while the authors frequently mention the intent to tackle this problem, they rely on publicly available general graph datasets, such as citation networks, for their experiments. Crucially, they do not utilize any recommendation system datasets, such as Movielens-1M or Amazon-Books.\n\n2. The work lacks theoretical guarantees. Although the authors attempt to explain in Section 3.2 how node duplication aids cold start link prediction, I would prefer to see definitive theorems or lemmas presented in the paper. This would provide a more solid theoretical foundation for the work.\n\n3. In Table 18, GCN+NodeDup shows a decline in overall metrics compared to GCN. What is the reason for this phenomenon?\n\n4. There are several works designed to solve link prediction problems using GNN methods, such as references [1], [2], and [3]. However, the authors do not compare NodeDup against these methods in their experiments.\n\n5. The method description lacks clarity. In the NodeDup approach, determining which nodes are cold starts is a crucial step. However, in the core algorithm (Algorithm 1), the authors do not demonstrate how to deterministically obtain the set of cold start nodes V_{cold}.\n\n6. The authors should provide a direct comparison with adding self-loops as an augmentation baseline is missing, which could help clarify the advantages of NodeDup and NodeDup(L) over simpler alternatives.\n\n7. The authors state that OGB datasets were not used because they ``lack a substantial number of isolated or low-degree nodes'' (Lines 892-893). However, even though OGB datasets may primarily include high-degree nodes, they still contain a subset of lower-degree nodes, which would allow for a relevant evaluation of the proposed method's performance across both node types. Additionally, testing on these datasets could strengthen the claim that the method does not compromise warm node performance, as OGB is a widely recognized benchmark.\n\n[1] Bai Q, et al. HGWaveNet: A Hyperbolic Graph Neural Network for Temporal Link Prediction. WWW-23.\n\n[2] Zhu Z, et al. Neural Bellman-Ford Networks: A General Graph Neural Network Framework for Link Prediction. NeurIPS-21.\n\n[3] Cai L, et al. Line Graph Neural Networks for Link Prediction. TPAMI-21."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "**Q1**: What is the key insight and contribution of the proposed heuristic strategy?\n\n**Q2**: Figure 2 shows that NodeDup significantly improves performance compared to GraphSage on cold nodes while achieving results comparable to GraphSage on warm nodes. However, it raises the question of why the overall performances of two models are nearly identical.\n\n**Q3**: The additional time complexity of the decoder should be $O( |\\mathcal{V}\\_{cold}| D)$ rather than $O((M+|\\mathcal{V}\\_{cold}|) D)$?\n\n**Q4**: Whether self-loops are included in the adjacency matrix of the datasets used or not?\n\n**Q5**: Further explanation about the differences in performance between NodeDup and NodeDup(L) on warm nodes is needed.\n\n**Q6**: Additionally, there are minor typos, such as in line 400, where \"did\" is incorrectly used."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1: This paper focuses on addressing the cold-start problem in link prediction tasks, a significant issue with numerous practical applications in real-world scenarios.\n\nS2: The authors conduct extensive experiments to demonstrate the effectiveness of their method compared to traditional GNNs, cold-start GNNs, and other data augmentation techniques.\n\nS3: The proposed method is simple and can be easily applied to different GNNs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces NODEDUP, a data augmentation method designed to enhance the performance of GNNs on low-degree nodes in downstream link prediction tasks. By duplicating low-degree nodes and creating links between nodes and their own duplicates, NODEDUP can significantly improve prediction performance on *cold nodes* without compromising overall performance on *warm nodes*. Extensive experimental results demonstrate the effectiveness of NODEDUP compared to base GNNs, cold-start GNNs, and other data augmentation GNNs. Additionally, NODEDUP is a plug-and-play module that can be easily applied to different GNNs with minimal additional cost."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**W1**: The primary concern of this paper is its limited novelty. The main idea of duplicating cold nodes is not a novel strategy within existing GNN frameworks. Additionally, the distinction between NodeDup and the common data pre-processing step of adding self-loops is not clear. When adopting general message passing GNNs, isolated nodes can also treat themselves as neighbors and update node representations using both $\\boldsymbol{W}\\_1$ and $\\boldsymbol{W}\\_2$. Without a clear differentiation, it is challenging to justify the novelty of NodeDup over existing techniques.\n\n**W2**: It is unclear why such a simple strategy significantly enhances overall GNN performance on warm nodes, particularly on the *citeseer* dataset, which contains a large number of isolated nodes with zero degrees. Consider an example in online social networks, where a node $u$ represents a famous individual with millions of followers, and most of $u$’s followers may have few connections with other users, serving as *cold nodes*. In this scenario, adding self-loops using NodeDup(L) on these cold nodes may introduce additional noise in predicting the *warm node* $u$. This could potentially degrade the model's performance rather than improve it. \n\n**W3**: Additional theoretical analysis from a spectral perspective, such as examining changes in the graph Laplacian after self-augmentation, could enhance the understanding of the self-augmentation strategy. Such analysis might provide deeper insights into why and how the self-augmentation strategy works, potentially revealing underlying mechanisms that contribute to its effectiveness or limitations.\n\n**W4**: More experiments on larger datasets, such as collab, ppa, and citation2 from the OGB benchmark datasets [R1], should be included in the main text. The current experimental setup may not be sufficient to generalize the findings across different types of graphs and scales. Including these larger datasets would provide a more comprehensive evaluation of the method scalability and robustness. Additionally, a comparison between NODEDUP and more recent link prediction models, such as MPLP [R2], is necessary.\n\n---\n\n[R1] W. Hu, M. Fey, M. Zitnik, Y. Dong, H. Ren, B. Liu, M. Catasta and J. Leskovec. Open Graph Benchmark: Datasets for Machine Learning on Graphs. 2020. NeurIPS(33): 22118-22133.\n\n[R2] K. Dong, Z. Guo, N. V. Chawla. Pure Message Passing Can Estimate Common Neighbor for Link Prediction. arXiv:2309.00976."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024node,\ntitle={Node Duplication Improves Cold-start Link Prediction},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yCr55EjC1d},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Neural Networks (GNNs) are prominent in graph machine learning and have shown state-of-the-art performance in Link Prediction (LP) tasks. Nonetheless, recent studies show that GNNs struggle to produce good results on low-degree nodes despite their overall strong performance. In practical applications of LP, like recommendation systems, improving performance on low-degree nodes is critical, as it amounts to tackling the cold-start problem of improving the experiences of users with few observed interactions. In this paper, we investigate improving GNNs' LP performance on low-degree nodes while preserving their performance on high-degree nodes and propose a simple yet surprisingly effective augmentation technique called NodeDup. Specifically, NodeDup duplicates low-degree nodes and creates links between nodes and their own duplicates before following the standard supervised LP training scheme. By leveraging a ``multi-view'' perspective for low-degree nodes, NodeDup shows significant LP performance improvements on low-degree nodes without compromising any performance on high-degree nodes. Additionally, as a plug-and-play augmentation module, NodeDup can be easily applied on existing GNNs with very light computational cost. Extensive experiments show that NodeDup achieves 38.49%, 13.34%, and 6.76% improvements on isolated, low-degree, and warm nodes, respectively, on average across all datasets compared to GNNs and state-of-the-art cold-start methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Network",
"Link Prediction",
"Cold-start",
"Graph Augmentation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/75ce8b1701660713dee4dc8c7b8f06933413a560.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Node Duplication Improves Cold-start Link Prediction"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yD2JMeKumt | DOTA: Distributional Test-Time Adaptation of Vision-Language Models | main | Active | Test-time;uncertainty;vision-language models | transfer learning, meta learning, and lifelong learning | 5;5;5;6 | 4;4;5;5 | 3;2;2;3 | 2;2;3;3 | 3;3;3;3 | 5.25 | 4.5 | 2.5 | 2.5 | 3 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. **Test Distribution Estimation:** In line 212 and from equations (4) and (6), it is described such that a batch of test samples arrive at each time step. However, prior baselines TPT, TDA perform single image TTA. Further, in Implementation details, in line 706, it is mentioned batch size is 1. Please clarify if single image TTA is done here as well, for fair comparison. If so, what does $n$ refer to in equations (5) and (6). How are $\\mu_k$ and $\\sigma_k$ estimated in single image TTA. Are you storing features, as done in TTA as well, along with the statistics?\n\n2. **Selection of uncertain samples:**\nConfidence is softmax applied over the similarity scores only right? Why is there such a large discrepancy using these two similar metrics (Table 7)? Also, is this similarity and confidence estimated from zero shot classifier or the classifier proposed? And why'd you choose what you choose?\n\n3. **Sensitivity to hyperparameters:** How sensitive is the method on the choice of the parameters $\\sigma, \\eta, \\rho$, as it's not practical to assume access to validation data before actually doing TTA? \n\n4. **Human in the loop TTA:** Please refer to the weaknesses and address the relevant concerns raised."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- They propose to estimate the class distributions in an online manner. \n- The adaptive fusion of zero shot text classifier based predictions and the distribution based feature similarities is simple and intuitive. \n- This being a backpropogation free approach, is very light-weight computationally, which is a great advantage for TTA.\n- The paper is well written and easy to follow, however several clarifications are required."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors address the problem of Test Time Adaptation of Vision Langugage models. They propose to continuously estimate the distribution of test samples, which they leverage through Bayes theorem, to make the final test predictions. They also collect human feedback to receive supervision for uncertain samples."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Distributional Test Time Adaptation:** In a single image TTA setting, the whole section 3.2 is quite unclear. There is no **batch** of samples in this setting. So, how are the class distributions actually updated at each time step.\n\n2. **Samples for distribution estimation:** Are all test samples used for updating the class distributions? Wouldn't the use of low confident/uncertain samples lead to bad parameter estimates in eqn(4)?\n\n3. **TTA with human feedback:** While this is one of the major contribution of the paper, it appears to only result in modest improvements. 5% and 15% is a lot of data to ask labels for, from a human. However, the results improve only of the order of 1-2%. This makes the efficiency of this whole process questionable.\n\n4. **Performance evaluation of TTA with human feedback:** As the test samples arrive in an online manner and based on uncertainty, how is the final accuracy evaluated here? Are these samples inclusive when evaluating accuracy? If so, you should be using the ground truth as predictions for actively labeled samples. Then the accuracy should be up by about 5% or 15%. As this is not the case in the results reported, are the labeled samples excluded from evaluation? This needs to be clarified. For fair comparison, all results should be reported on the complete test set, even when using human feedback.\n\n5. **Need stronger baselines for TTA with human feedback:** To study the role of TTA with human feedback, stronger baselines need to be established, using different selection strategies, like random, confidence, entropy etc. and report the accuracy of complete test set. As all strategies have same amount of labeled samples included, the performance improvement due these strategies as well as the gains wrt no human feedback can be assessed. \n\n6. **Amount of human feedback:** 5% and 15% is a lot of supervision and this may not be feasible during test time. It's more practical to ask labels for about 1-2% of test data. Experiments with stronger baselines, with lesser supervision, with correct evaluation method, is required to actually understand and evaluate the role of human feedback in TTA.\n\n7. **Choice of hyperparameters:**\nIn Implementation details, it is mentioned that validation sets are used to choose the hyperparameters. However, in TTA, one does not have access to any data from the test distribution apriori. Hence, validation data is not accessible in practice. Well, if one had access to validation data for test data, it provides a lot more information and could be used for more than just hyperparameter tuning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. This paper is novel and interesting; would you consider making the code open source?\n2. In Table 1, for ResNet-50, DOTA’s performance is only slightly better than TDA, with an average improvement of just 0.15%. I would like to understand the reason for this marginal gain."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper is well written and easy to follow.\n2. The idea is interesting, the motivation of this paper is clear as well as the novelty of the method.\n3. The proposed method is extensively tested against prior work and outperforms on a variety of tasks/baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The author presents DOTA, a method that adapts to deployment conditions by continually estimating test sample distributions rather than memorizing them. Using Bayes’ theorem, Dota computes posterior probabilities for real-time adaptation. A human-in-the-loop feature also gathers feedback on uncertain samples, enhancing adaptability. Experiments show Dota outperforms current methods with continuous test-time learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper does not include a detailed case study focused on a particular domain or a challenging dataset.\n2. This paper does not include experiments assessing the model's sensitivity to hyperparameters. A detailed analysis of hyperparameter tuning could offer valuable insights into the robustness and generalizability of the proposed approach.\n3. The paper could benefit from additional visualizations illustrating Test-time adaption with human feedback"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does the performance change w.r.t. the data stream?\n2. What is the impact of the various hyper parameter?\n3. How does the test-time forgetting phenomenon happen?\n4. How does the model compare with other baselines, e.g., DMN?\n5. How does the work relate to existing ones on prototypical networks and the various TTA settings?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. DOTA revisits principles in the literature on continual learning via nearest class mean classifiers (e.g., [a,b]) for improving the performance of VLMs at test time. Overall the approach is easy to implement and can be considered a valid baseline for future works, performing continuous TTA without the need for storing a cache, as in TDA.\n\n2. The article is well-structured and easy to follow, guiding the reader through all the design choices. \n\n3. DOTA is effective (as shown in the comparisons with TDA, e.g., Tab. 1 and Tab. 2) and computationally cheap (as shown in Tab. 3). \n\n4. Fig. 3 provides an analysis of how the performance of the two models (TDA and DOTA) vary w.r.t. the number of samples, showing the advantages of the latter.\n\n**References**:\n\n[a] Mensink, Thomas, et al. \"Distance-based image classification: Generalizing to new classes at near-zero cost.\" IEEE transactions on pattern analysis and machine intelligence 35.11 (2013): 2624-2637.\n\n[b] Bendale, Abhijit, and Terrance Boult. \"Towards open world recognition.\" Proceedings of the IEEE conference on computer vision and pattern recognition. 2015."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The article addresses the problem of test-time adaptation of vision-language models. Differently from previous works (e.g., TDA, Karmanov et al. 2024) that use a cache to store samples, the proposed method, DOTA, stores an online estimate of the statistics (mean and variance) of each class of interest. These statistics are then used during inference to refine standard CLIP predictions. An active learning strategy exploiting this statistic is also proposed to improve the performance of the model further, asking a user to annotate the least confident examples. Experiments on a wide range of tasks show the efficacy of this approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. DOTA continually updates its estimates of the statistics. Those might be affected by various factors linked to the experimental protocol, (e.g., order of the classes in the stream, batch size) as well as hyperparameters choice (i.e., the initial variance value, the shrinkage $\\epsilon$, $\\lambda$'s hyperparameters). Currently, the article does not provide too many insights on these factors, with the analysis mostly limited to the active learning percentile (i.e., Fig. 3). To assess the robustness of the model and provide a thorough study of its performance, it would be interesting to show results across multiple data ordering (i.e., currently it is not clear how many orders have been tested) and whether the performance changes w.r.t. the particular stream considered, even on the edge-cases where the data is non-i.i.d. [c]. Moreover, the hyperparameters may impact the speed of adaptation (e.g., variance initialization, $epsilon$) as well as how much the pretrained model is considered (e.g., $\\lambda$s): studying their impact is essential to fully evaluate the complexity of the approach and potential difficulties in applying it on real-world scenarios. \n\n2. While TPT and DiffTPT are strong models for test-time adaptation, they work on the episodic setting, i.e., where adaptation is held out on a single sample, and then the model is reset to its previous state. The possibility of storing/using test-time data (assuming coherence in the sequence) is a non-negligible advantage that DOTA has (and that it shares with TDA). This makes both the \"continual adaptation\" mark on TPT (Tables 1 and 2) potentially misleading, as well as TDA the only true baseline acting under the same priors of DOTA. To make the results stronger, it would be beneficial to add more baselines, such as DMN [d]. \n\n3. Following on the previous points, adapting to an evolving stream is a much more nuanced problem, where correlation between consecutive data may play an important role. Thus, various TTA settings with different types of stream and data dependencies (e.g., practical TTA [e], universal TTA [f]) could have been considered to further show the effectiveness of the approach. \n\n4. A key motivation behind DOTA relies on the test-time forgetting of TDA (lines 52-56). However, there are no experiments demonstrating this point (beyond the quantitative advantages of DOTA). An analysis clearly showing this phenomenon (and how DOTA is more robust to it) would strengthen the motivation behind the approach. \n\n5. The active learning strategy proposed to refine the performance for uncertain samples (Section 3.3) is a nice addition to make the approach more coherent but it lacks competitors. For instance, also TDA could employ a similar strategy (as the update of the cache is based on the confidence of the predictions). Moreover: (i) the accuracy with random feedback is also very close to those achieved with the proposed strategy (e.g., 0.6% gap on average in Tab. 6, 5% percentile); (ii) for the confidence-based scoring to work, the model is assumed to be calibrated, something not always true and that needs a proper discussion [g]; (iii) in Tab. 7 the accuracy of the random baseline is not reported: this is an important reference to put results into perspective. \n\n6. Related work (Section 2) provides a limited discussion on the various types of TTA settings (e.g., [e,f]) as well as on previous methods employing online updates of statistics for continual learning/open world recognition [b] or prototype-based few-shot learning [I,j]. Expanding the discussion would help to better contextualize the work in the current literature.\n\n**Minors**: \n\n- Footnote 1 hints that the model could be applied beyond CLIP. However, there are no experiments confirming this claim. It would have been more thorough to show other models (e.g., SigLIP [h]) to support it.\n\n- Table 4 shows the results only for DOTA. It would be interesting to see the same analysis for the other baselines (e.g., TDA) to contextualize/provide a reference for the results.\n\n\n**References**:\n\n[a] Mensink, Thomas, et al. \"Distance-based image classification: Generalizing to new classes at near-zero cost.\" IEEE transactions on pattern analysis and machine intelligence 35.11 (2013): 2624-2637.\n\n[b] Bendale, Abhijit, and Terrance Boult. \"Towards open world recognition.\" Proceedings of the IEEE conference on computer vision and pattern recognition. 2015.\n\n[c] Gong, Taesik, et al. \"Note: Robust continual test-time adaptation against temporal correlation.\" Advances in Neural Information Processing Systems 35 (2022): 27253-27266.\n\n[d] Zhang, Yabin, et al. \"Dual memory networks: A versatile adaptation approach for vision-language models.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2024.\n\n[e] Yuan, Longhui, Binhui Xie, and Shuang Li. \"Robust test-time adaptation in dynamic scenarios.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2023.\n\n[f] Marsden, Robert A., Mario Döbler, and Bin Yang. \"Universal test-time adaptation through weight ensembling, diversity weighting, and prior correction.\" Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. 2024.\n\n[g] Tu, Weijie, et al. \"An Empirical Study Into What Matters for Calibrating Vision-Language Models.\" International Conference on Machine Learning. 2024.\n\n[h] Zhai, Xiaohua, et al. \"Sigmoid loss for language image pre-training.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n\n[i] Snell, Jake, Kevin Swersky, and Richard Zemel. \"Prototypical networks for few-shot learning.\" Advances in neural information processing systems 30 (2017).\n\n[j] De Lange, Matthias, and Tinne Tuytelaars. \"Continual prototype evolution: Learning online from non-stationary data streams.\" Proceedings of the IEEE/CVF international conference on computer vision. 2021."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see the Weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The writing and figures are good and easy to understand.\n\n2. The DistributiOnal Test-time Adaptation (DOTA) method for Vision-language foundation models without BP is simple yet effective during testing in new target domain, achieving a significant improvement compared to current state-of-the-art methods in most of the datasets.\n\n3. This paper first define the test-time adaptation problem with human feedback, allows the test-time adaptation for uncertain samples with human feedback.\n\n4. An adaptive final fusion probability is introduced to mitigate the potential negative impact when the number of test samples is insufficient."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a Distributional Test-time Adaptation (DOTA) method, which adapt the pretrained Vision-language foundation models (e.g., CLIP) to the test target domain by estimating the distributions of different categories for test samples continually. The authors further introduce a human feedback collaboration method which identifies uncertain samples to further enhance the adaptability. Extensive experiments on diverse datasets validate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed method estimate the data distribution of samples in the current test environment during testing, but there lack some evidence or visualization. Why updating the feature distribution for different categories works better?\n\n2. The DOTA method seems somewhat similar to the T3A method [R1], which continually maintains a memory bank for prototypes during the testing stage. Could the authors clarify and analysis the difference and the advantages of the proposed method?\n\n[R1] Test-Time Classifier Adjustment Module for Model-Agnostic Domain Generalization\n\n3. The proposed method seems works for not only VLMs but also other models in all classification tasks. Is it suitable for traditional TTA or Domain Generalization tasks?\n\n4. There could give more details and explanation about the $f_k(x)$ in Eq.(3).\n\n5. The uncertainty estimation method is simple. Is there any other uncertainty estimation method (like entropy) better?\n\n6. There missing an ablation study for the adaptive fusion probability."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We proposes a new Distributional Test-time Adaptation (DOTA) method, which continuously estimates the distribution of test samples and incorporates human-machine collaboration to handle uncertain samples."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dota,\ntitle={{DOTA}: Distributional Test-Time Adaptation of Vision-Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yD2JMeKumt},\nnote={under review}\n}"
},
"abstract": {
"value": "Vision-language foundation models (e.g., CLIP) have shown remarkable performance across a wide range of tasks. However, deploying these models may be unreliable when significant distribution gaps exist between the training and test data. The training-free test-time dynamic adapter (TDA) is a promising approach to address this issue by storing representative test samples to guide the classification of subsequent ones. However, TDA only naively maintains a limited number of reference samples in the cache, leading to severe test-time catastrophic forgetting when the cache is updated by dropping samples. In this paper, we propose a simple yet effective method for DistributiOnal Test-time Adaptation (DOTA). Instead of naively memorizing representative test samples, DOTA continually estimates the distributions of test samples, allowing the model to continually adapt to the deployment environment. The test-time posterior probabilities are then computed using the estimated distributions based on Bayes' theorem for adaptation purposes. To further enhance the adaptability on the uncertain samples, we introduce a new human-machine collaboration paradigm which identifies uncertain samples, collects human-feedback, and incorporates it into the DOTA framework. Extensive experiments validate that DOTA enables CLIP to continually learn, resulting in a significant improvement compared to current state-of-the-art methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Test-time",
"uncertainty",
"vision-language models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/261bbc6ea262d382c59592ddefbfea93b7ceb710.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "DOTA: Distributional Test-Time Adaptation of Vision-Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yD7oAhFEtD | Conv-Basis: A New Paradigm for Efficient Attention Inference and Gradient Computation in Transformers | main | Active | Attention Acceleration;Fast Fourier Transforms;Gradient Computation | learning theory | 3;5;6 | 3;2;2 | 2;3;3 | 2;2;3 | 2;2;3 | 4.666667 | 2.333333 | 2.666667 | 2.333333 | 2.333333 | -0.944911 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How would the author expect $k$ to scale with context length and the size of the model?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper investigates the timely question on how to speed up attention calculation and offers a new perspective on when attention calculation can be speed up.\n\n2. While focusing on the theoretical perspective, this paper present empirical validation on the correctness of the presented algorithm and required $k$ to approximate attention matrix."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper examines the possibility to compute attention using Fast Fourier Transformer when attention matrix can be well approximated by the sum of some convolution matrices. They show that when the Attention matrix can be broken down in this way, both inference and training can be done in almost linear time when $kd = n^{o(1)}$."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The algorithm 3 is deferred to appendix while being a key building block of the algorithm. In general, a more detailed explaination on how the binary search is carried out will improve the clarity of the paper.\n\n2. Because the current work is built upon the previous work with low rank assumption, a more direct comparison (either empirically or theoretically) between the current method and previous one should be presented."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- What relationship does the conv-basis rank ($k$) have to the context length?\n- Can your method be applied on top of existing pretrained transformers without any retraining or finetuning?\n- Why is the one provided experiment performed on such a small scale. Given that the method reduces the quadratic complexity of a transformer, shouldn't it be able to scale to huge context lengths without much trouble? It would be interesting to compare this to another method such as Streaming LLM [1] on a streaming perplexity task (like PG19) or other long context benchmark which scales to the order of millions of tokens. \n\n## Overall\n\nOverall, I think the work attempts to make an ambitious change to the attention operation in order to lower the complexity. However, I believe anyone reading this work will be left with many questions about the workings of the algorithms as well as the performance characteristics. I believe adding more experiments, ablations, and answering the questions posed here will aid in understanding the proposed method more fully."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed method offers a way to reduce the prohibitive quadratic complexity of modern transformers.\n- As compared to previous works, the method appears to be much less restrictive on the structure of the attention mask which allows it to be applied to modern LLM settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a k-basis system for convolutional matrices which allows for sub-quadratic inference in attention with fewer constraints on the form of the attention as compared to previous works."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The experiments section is rather light, and contains only one experiment. Given that the proposed method differs quite drastically from previous transformers, I would like to see more empirical validation and ablations on different aspects of the performance.\n\n- It is not clear to me if the experiment which was performed on Llama3 required any training from the proposed method or not.\n - If it did not require training, I would be interested to see the performance of the proposed method which did include training or finetuning.\n\n- The rank $k$ in k-conv is given, however after reading I am curious as to how $k$ relates to performance on long context tasks. Does $k$ need to scale commensurately with the context length?\n\n- I am left confused by the following items in Algorithm 2:\n - What is $u$ and where is it introduced in the method?\n - What is $v$ and where is it introduced in the method?\n - What is the $s$ subscript in algorithm 2 ($\\tilde{H}_s \\leftarrow M_s \\odot (QK^\\top_s)$) supposed to denote?\n - What is happening on line 8 after achieving $\\tilde{H}_s$\n\n- The algorithmic complexity looks compelling, but what is the actual wall clock time of the experiment compared to the quadratic tranformer? Can you run an additional experiment which looks at this, even if it only utilizes random inputs? I suspect that at smaller context lengths such as 2K which is provided in the experiment, the proposed method might perform worse in terms of wall clock time, but it would be interesting to know at what length the crossover happens to where the lower complexity starts to show real gains in performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- As the authors mention, the proposed concept of the conv-basis approximation to sequence models seems quite related to prior work around sub-quadratic alternatives to attention, e.g. SSMs [1], Mamba [2]. Is there some way a conv-approximation of an existing attention matrix can be interpreted as a set of specific kernels for a subquadratic model?\n - In particular, I wonder if the conv-basis concept relates to the semiseparable matrices of Mamba2 [3]?\n- It would be interesting to see how the approximability of attention vs. number of conv-basis elements changes based on the task, e.g. do math reasoning tasks (GSM8K) require more basis elements than simple question-answering?\n\n1. Efficiently Modeling Long Sequences with Structured State Spaces\n2. Mamba: Linear-Time Sequence Modeling with Selective State Spaces\n3. Transformers are SSMs: Generalized Models and Efficient Algorithms Through Structured State Space Duality"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors investigate an important problem: how to speed up the efficiency of the self-attention mechanism, the computational bottleneck in Transformers.\n- The theoretical results are interesting, thorough, and clearly presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work investigates the problem of approximating matrices using a lower-triangular Toeplitz matrix basis, or \"conv-basis\", focusing specifically on the self-attention mechanism. The authors provide theoretical results about the approximability of lower triangular matrices into such a convolution basis, and provide algorithms for efficiently recovering this basis and using it to efficiently perform the self-attention operation using FFTs. Finally, the authors provide empirical results showing that their proposed method requires few basis elements to recover performance on the IMDB dataset when applied to Llama3 8B."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I think the theoretical results are quite interesting and deserve to be presented. However, the main weakness seems to be that the empirical results are quite limited.\n - The empirical speedup results are limited. Although asymptotically, the authors argue that the conv-basis approximation is more efficient, as far as I can tell, an empirical comparison of their method's speed vs. standard self-attention is not provided. It could be clarifying to provide such a comparison in the case of e.g. Llama3 8B, so that it's clear at what sequence length such an approach becomes more efficient in practice.\n - Additionally, the expressivity vs. efficiency tradeoff inherent to the method could be explored more thoroughly. The main theoretical result in the paper is that any lower triangular matrix can be well approximated in the conv basis, but accurate recovery may take as many as n basis elements in the worst case. The authors further support this result with experimental validation on the IMDB dataset with Llama3 8B, but this is only one task and relatively simple. It could be clarifying to provide additional experimental results on more recent LM benchmarks (e.g. HellaSwag, WinoGrande?)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024convbasis,\ntitle={Conv-Basis: A New Paradigm for Efficient Attention Inference and Gradient Computation in Transformers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yD7oAhFEtD},\nnote={under review}\n}"
},
"abstract": {
"value": "The self-attention mechanism is the key to the success of transformers in recent Large Language Models (LLMs). However, the quadratic computational cost $O(n^2)$ in the input sequence length $n$ is a notorious obstacle for further improvement and scalability in longer contexts. In this work, we leverage the convolution-like structure of attention matrices to develop an efficient approximation method for attention computation using convolution matrices. We propose a $\\mathsf{conv}$ basis system, analogous to the rank basis, and show that any lower triangular matrix can always be decomposed as a sum of structured convolution matrices in this basis. We then design a fast algorithm to approximate the attention matrix via a sum of such $k$ convolution matrices. This allows us to compute the attention {\\it inference} via Fast Fourier Transforms (FFT) in $O(knd \\log n)$ time, where $d$ is the hidden dimension, and thus achieve almost linear time $n^{1+o(1)}$ in the practical scenario where $kd = n^{o(1)}$. Furthermore, the attention {\\it training forward} and {\\it backward gradient} can be computed in $n^{1+o(1)}$ as well. We provide theoretical guarantees on the run time and approximation error and conduct preliminary experiments to evaluate its effectiveness. We hope our new paradigm for accelerating attention computation in transformer models can help their application to longer contexts."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Attention Acceleration",
"Fast Fourier Transforms",
"Gradient Computation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a1c2d21ac4bbe2498d480ea6aa88f38ce9a8cd39.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/34f735500c75cc23eba9063d9a3be5509f9ba160.zip"
},
"title": {
"value": "Conv-Basis: A New Paradigm for Efficient Attention Inference and Gradient Computation in Transformers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yDICgRUj5s | A Causal Lens for Evaluating Faithfulness Metrics | main | Active | faithfulness;diagonsticity;natural language explanations;interpretability;model editing | interpretability and explainable AI | 3;3;5;5;6 | 4;4;4;3;3 | 2;1;2;3;3 | 2;2;2;2;3 | 4;2;2;2;3 | 4.4 | 3.6 | 2.2 | 2.2 | 2.6 | -0.748455 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above in weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Originality: The paper introduces a novel approach that uses causal model editing to generate faithful-unfaithful explanation pairs, offering a rigorous basis for assessing faithfulness in natural language explanations. This approach combines causality with faithfulness evaluation and tries to get to the model’s true reasoning processes.\n\n2. Quality: The paper is rigorous, with comprehensive experiments across three tasks and multiple language models. The inclusion of alternative model editing techniques and ablation studies further strengthens the evaluation framework.\n\n3. Clarity: The paper is well-organized, with clear, detailed explanations. Tables and figures present complex concepts in easy to understand manner. \n\n4. Significance: This work is significant in that its findings highlight specific improvements needed in LLM explanation fidelity.\n\nIn summary, this paper is a good contribution to the field of LLM interpretability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new framework to assess the faithfulness of natural language explanations generated by large language models (LLMs). To evaluate existing metrics, the framework employs model editing to create pairs of faithful and unfaithful explanations and tests various metrics using a benchmark of three tasks: fact-checking, analogy, and object counting. The study finds that while the CC-SHAP metric consistently outperforms others, many metrics fail to reliably capture faithfulness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The use of synthetic explanations may be limiting, as these pairs might not fully represent actual model-generated explanations. It would be helpful if the authors provided an analysis of how well synthetic explanations align with actual ones.\n2. The focus on three specific tasks (fact-checking, analogy, object counting) may not generalize well to more complex contexts. Adding diverse tasks or discussing broader applicability would be helpful. Have the authors considered experimenting with other complex contexts?\n3. Relying on diagnosticity as a faithfulness measure is overlooking other aspects of reasoning, like consistency and coherence. Including complementary metrics or discussing proposed framework’s limitations would be helpful."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Can the author say more on how do they generate synthetic explanations?\n2. Can the authors justify the use of word \"causal\" in the metric/framework they introduce?\n3. Fig. 6, seems to have a bug in the legend?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written - the motivation of the work is clearly presented, related works are well discussed, proposed approach and experiments are clearly described, and results are well discussed.\n\n2. The topic the paper focuses on is extremly important. Given the widespread usage of LLMs, it is very important to develop faithful methods to explain their predictions, but it is equally important to benchmark them.\n\n3. The experiments are diverse and include ablation studies to understand if and how the conclusion generalises."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents an empirical study to analyse the **reliability of faithfulness metrics for explanations of LLMs**. The authors propose \"Causal Diagnosticity\" as a metric to evaluate seven faithfulness metrics (for both post-hoc and Chain of Thought explanations) on three NLP tasks across four different LLMs. The results suggest that CC-SHAP is the most reliable metric among all the evaluated faithfulness metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper seems very applied to me with limited novelty. The authors expand an existing metric (called diagnosticity) to natural language explanations by arguing that random text cannot work as meaningful explanation (line 188..). However, this argument needs more backing/examples as random text can be considered as unfaithful explanation as done previously by Chan et al. 2022b.\n\n2. Secondly, the authors introduce model editing as a way to generate pair of explanations (faithful and unfaithful), but this might be limiting the analysis as a given model editing method may not work perfectly (because the knowledge may be incorrectly learned), hence, model prediction may change, but the reason is incorrect. The authors argue to use \"synthetic explanations\" to handle this, but it is not clear if insights on synthetic explanations are generalisable to the real world. For e.g., are the syntetic explanations guranteed to not hallucinate?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The framework on evaluating faithfulness metrics for natural language explanations is quite novel.\n\nThe use of model editing to create the three synthetic tasks is also very novel. \n\nExtensive evaluations of several different faithfulness metrics are used."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes Causal Diagnoticity, a benchmark to evaluate metrics that evaluate natural language explanations generated by LLMs. In particular, this benchmark creates carefully edited models for three tasks that lead to unusual reasoning in three tasks: fact-checking, analogy and object counting. For each model response, two explanations are synthetically generated, with one containing the correct reasoning and the other containing the incorrect reasoning. Then different faithfulness metrics are computed to evaluate this pair of explanations, and the score difference between the correct and incorrect explanations is taken as the quality (i.e., diagnosticity) of the metric. Experimental results suggest that CC-Shap is the best performing, but still leaves much room for improvement."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My biggest concern is with the generation of synthetic explanations, and the assumption that one is correct and the other is incorrect. In particular, while the model is edited on the particular fact, it is unclear that the particular editing causes the model to use the \"intended\" reasoning path, or the model is actually using some very different reasoning paths. For example, in the Rihanna example, it could be that the model editing removes \"Rihanna\" entity from the \"singer set\", and hence results in the model predicting no. In this case, the \"correct\" explanation should be something like \"No, because I do not find Rihanna in the singer set\" (though this language may be highly unlikely to be generated by an LLM). How to carefully eliminate such possibilities is, in my opinion, the most crucial issue in establishing the soundness of the framework.\n\nIn addition, for a slightly related question, can we really assess the faithfulness of natural language explanations that are not generated by the models themselves (i.e., having low or almost-zero probability of being decoded), or in other words, is such evaluation fundamentally meaningful? In \"traditional\" interpretability, all the explanations, such as feature attribution, concept and counterfactual, are generated using well-defined algorithms, and hence the \"meaningfulness\" of the explanation is of little doubt. However, with synthetic explanations, I am less sure, so I would hope that the authors could convince me on this aspect.\n\nOf a minor note, there are also alternative ways to generate and evaluate LLM-generated explanations, such as [1] and [2], that could be discussed in related work. Furthermore, there is even work that outright assert that LLMs cannot explain via natural language explantions [3].\n\n[1] https://arxiv.org/abs/2310.11207\n\n[2] https://aclanthology.org/2024.findings-acl.19.pdf\n\n[3] https://arxiv.org/pdf/2405.04382"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The following questions are about perplexity described in Section 5.2:\n - What is perplexity? Could the authors provide a definition and explain how it is calculated?\n - Why can low perplexity indicate a faithful explanation?\n - If low perplexity does indeed correlate with faithful explanations, then perplexity itself could serve as a metric for evaluating faithfulness. Why is it not evaluated with **Causal Diagnosticity**? Without assessing the **Causal Diagnosticity** of perplexity, how can it be sufficient to determine whether an explanation is faithful?\n\n2. Are the synthetically generated explanations produced by the LLMs themselves? In line 203, the authors mention that \"$\\overline\\theta$ generates the explanation $\\overline\\epsilon$ and $\\widetilde\\theta$ generates the explanation $\\widetilde\\epsilon,\" which is confusing.\n\n3. Is the use of $\\widetilde\\theta$ necessary?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Natural language explanations are becoming increasingly relevant due to advancements in LLMs, making them more accessible for end users. This work aids users in selecting a metric to evaluate the faithfulness of natural language explanations.\n- The idea of extending diagnosticity from feature attribution to natural language explanations is straightforward and relevant.\n- The authors conducted empirical experiments and discussed the effects of model knowledge editing methods, explanation types, and the reliability of these edits. The results demonstrate that CC-SHAP outperforms other metrics."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose **Causal Diagnosticity**, a metric designed to evaluate the diagnosticity of existing faithfulness metrics for natural language explanations, where diagnosticity indicates how often a faithfulness metric favors faithful explanations over unfaithful ones.\n\nThrough evaluations of several post-hoc and Chain of Thought (CoT) faithfulness metrics across four tasks and four large language models (LLMs), the authors conclude that CC-SHAP outperforms the other metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Equation (5), the assumption is made that $\\overline\\epsilon_i$ is faithful to $\\overline\\theta$, while $\\widetilde\\epsilon_i$ is not. However, this is not guaranteed in the experiments. Though the authors discuss this in Section 5.2, it's unclear why perplexity can indicate faithfulness.\n\n\n2. The model editing discussed in Section 2.2 involves modifying the internal weights of LLMs, which restricts the experiments to open-source LLMs, excluding closed-source models like GPT-4.\n\n3. The model $\\widetilde\\theta$ is exclusively used to generate unfaithful explanations and is not incorporated in Equation (5). Since directly modifying $\\overline\\epsilon$ could also yield an unfaithful explanation related to $\\overline\\theta$, the necessity of using $\\widetilde\\theta$ is unclear.\n\n4. The placement of figures throughout the paper is disorganized, which hurts readability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Regarding model editing, is changing a single fact (i.e. “is paris the capital of france”) sufficient to override all pretrained knowledge in the LLM? I ask because this is a key part of the evaluation framework.\n- How might this framework, especially model editing aspects, be adapted to more complex/realistic scenarios?\n- Minor: I may be misunderstanding something here, but do you need to modify two models for causal diagnosticity? Is it not sufficient to change the knowledge in one LLM, and compare the modified LLM to the original?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Tailors existing datasets to the new tasks.\n- Comprehensive evaluation across a range of tasks, with many existing metrics.\n- The paper is well written. The main idea of using two models for causal diagnosticity is emphasized throughout. Separate applications are detailed well.\n- Decent evidence is provided to demonstrate that many existing faithfulness metrics are staggeringly weak, as they fail to pass tests on basic tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a new framework to assess existing faithfulness metrics for natural language explanations (NLEs) from Large Language Models (LLMs). It adapts a previous test for faithfulness metrics, diagnosticity, which uses random feature attributions as unfaithful explanations. The new framework, causal diagnosticity, edits the LLM's knowledge in order to generate explanations that can be determined as unfaithful. Several existing metrics are evaluated on modified datasets across a range of tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper points out that many existing faithfulness metrics from [1] are flawed, which is useful extra evidence, though it is fundamentally unclear to me how this is going to lead to substantial later improvements.\n- While the datasets used are systematic enough to gain insights, a big fear is that they are overly simplistic. The examples given are for very short chain-of-thought responses to basic questions. Moreover, most of the faithfulness metrics in [1] are quite premature and have been criticized in the literature [2, 3]. It makes sense to me therefore that CC-SHAP might perform well, but the performance here does not really shed insight into the performance of the metric on more subtle scenarios (nor is it clear how well the evaluation framework is able to handle real-world tasks such as medical question answering).\n- These are my fundamental issues with the current setup, alongside relatively small/similar models being assessed. The faithfulness problem is due mostly to the fact that we do not have good ground truth of what faithful explanations are internally. This paper offers ground truths but in very simple settings.\n\n[1] Measuring Faithfulness in Chain-of-Thought Reasoning, Lanham et. al., 2023\n\n[2] Chain-of-Thought Unfaithfulness as Disguised Accuracy, Bentham et. al., 2024\n\n[3] On Measuring Faithfulness or Self-consistency of Natural Language Explanations, Parcalabescu and Frank, 2023"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Causal Lens for Evaluating Faithfulness Metrics},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yDICgRUj5s},\nnote={under review}\n}"
},
"abstract": {
"value": "The increasing capabilities of Large Language Models (LLMs) have made natural language explanations a promising alternative to traditional feature attribution methods for model interpretability. However, while these explanations may seem plausible, they can fail to reflect the model's underlying reasoning faithfully. The idea of faithfulness is critical for assessing the alignment between the explanation and the model's true decision-making mechanisms. Although several faithfulness metrics have been proposed, they lack a unified evaluation framework. To address this limitation, we introduce Causal Diagnosticity, a new evaluation framework for comparing faithfulness metrics in natural language explanations. Our framework extends the idea of diagnosticity to the faithfulness metrics for natural language explanations by using model editing to generate faithful and unfaithful explanation pairs. We introduce a benchmark consisting of three tasks: fact-checking, analogy, and object counting, and evaluate a diverse set of faithfulness metrics, including post-hoc explanation-based and chain-of-thought (CoT)-based methods. Our results show that while CC-SHAP significantly outperforms other metrics, there is substantial room for improvement. This work lays the foundation for future research in developing more faithful natural language explanations, highlighting the need for improved metrics and more reliable interpretability methods in LLMs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"faithfulness",
"diagonsticity",
"natural language explanations",
"interpretability",
"model editing"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cbe029e6bbba7195d53ece5de12df825bebbd5d4.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "A Causal Lens for Evaluating Faithfulness Metrics"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yDlvteYBbF | Differentiable Distance Between Hierarchically-Structured Data | main | Active | Distance;Distance function;Tree-structured data;Heterogenous Graphs;JSONs;Multiple Instance Learning | learning on graphs and other geometries & topologies | 3;3;5;5 | 3;4;3;3 | 2;3;2;2 | 2;3;2;2 | 1;3;3;3 | 4 | 3.25 | 2.25 | 2.25 | 2.5 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "-Please I would like to hear the authors discussi the related works on Hyperbolic spaces for dealing with hierarchical data and Hierarchical clustering, where data also have latent tree structure. The goal here is to compare different trees and assign a loss to each so that lower loss means better tree for the dataset. The approaches there are also differentiable so how do you compare with them?\n\n-For your metrics, is there any hope to prove something about the quality of the metric found?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+the paper studies a natural problem on hierarchies which is how to define suitable metrics that are differentiable and modular. \n\n+the authors present some natural candidate and apply it to different types of hierarchical data\n\n+the authors present experimental results showcasing properties of their proposed metrics and benefits over prior methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies hierarichically structured data and introduces a tree-distance with differentiable parameters weighting the importance of different subspaces. The paper presents experimental evidence that their approach achieves similar performance to SOTA methods based on neural networks while having orders of magnitude fewer parameters, and also has some benefits for heterogeneous Graph Neural Networks compared to prior methods.\n\nThe paper is motivated by the fact that there are many structured data formats such as JSON/XML/Protobuffer but not a good way of defining a reasonable notion of distance between them, which is in contrast with what happens when we deal with more standard objects like vectors in Euclidean space.\n\nThis paper proposes a particular distance called HTD distance, which exploits the recursive nature of the previously-mentioned data formats. The ultimate goal is to have a modular construction by combining potentially different metrics on different levels of the given tree. HTD has weight parameters, which control importance on different parts, is differentiable, and requires orders of magnitude fewer parameters than neural networks with similar guarantees (based on experiments). The authors perform a series of experiments with supervised learning, ianomaly detection, heterogenous GNNs, clustering and UMAP for visualization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-the theory is very straightforward in this paper. In fact, the two theorems stated as Th1 and Th2 could be obserations or propositions as they follow from the basic definitions.\n\n-there have been recently approaches to define differentiable objectives suitable for doing optimization over trees and hierarchies, especially to deal with problems on relational data coming from networks (e.g. facebook or other social networks) with the goal of performing hierarchical clustering. The first such works were 1) Nickel et al. \"Poincaré embeddings for learning hierarchical representations\" and later 2) \"Hyperbolic graph neural networks\" of Nickel et al. and later the works of 3) Chami et al. \"Hyperbolic graph convolutional neural networks\" and 4) \"From Trees to Continuous Embeddings and Back: Hyperbolic Hierarchical Clustering\" and of 5) Monath et al. \"Gradient-based hierarchical clustering using continuous representations of trees in hyperbolic space\" have dealt with similar questions. I am surprised the authors do not cite such works as the problem of optimization over trees was addressed using differentiable methods in all of these works.\n\n-omission of discussion for use of hyperbolic techniques and hyperbolic spaces in the present paper which is known to be suitable for hierarchical relations, much more than euclidean spaces."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is overall well-written and easy to follow.\n2. This paper demonstrates theoretical superiority, as shown in Table 1."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the Hierarchically-structured Tree Distance (HTD), a novel metric for structured data in formats like JSON and XML. Designed for rooted heterogeneous trees, HTD is modular and differentiable, allowing it to adapt to tasks like classification, clustering, and anomaly detection. Experiments show that HTD-based algorithms perform competitively with neural network methods while using far fewer parameters and are more effective for analyzing heterogeneous Graph Neural Networks than the Tree Mover’s Distance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors demonstrate the effectiveness mainly on distance-based tasks, which shows better performance compare to other distance-based method but does not appear comparable to GNN classifiers.\n\n2. I am also concerned about the contribution and scope of this paper; however, I acknowledge that I am not an expert and am open to other opinions.\n\n3. Although some limitations are mentioned in the submission (e.g., in the caption of Table 4), there is no comprehensive discussion of the proposed method's limitations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the weaknesses above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well-written.\n- The proposed HTD generalizes the tree mover’s distance, making it applicable to both heterogeneous graphs and tree-structured data.\n- Extensive experiments conducted across multiple tasks—classification, clustering, and anomaly detection—show HTD's superiority over state-of-the-art methods for tree-structured data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the Hierarchically Structured Tree Distance (HTD), a metric designed to measure distances between tree-structured data commonly stored in formats like JSON and XML. HTD effectively represents message passing in heterogeneous graph neural networks (GNNs). Experimental results show that this distance metric is capable of addressing various machine learning tasks, including classification, visualization, and clustering."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have several concerns regarding the novelty of the proposed distance:\n- HTD appears to be a straightforward extension of the tree mover’s distance, replacing the optimal transport (OT) distance with the Hausdorff and Chamfer distances, which may introduce cheaper computational complexity.\n- It is unclear why HTD outperforms the tree mover’s distance on homogeneous graph datasets, such as MUTAG and BZR, as shown in Table 3.\n- Even on heterogeneous datasets, it seems feasible to apply the tree mover’s distance by constructing separate computational trees for each node type in the graph. So we can improve the performance of tree mover's distance on heterogeneous datasets like MUTAG and BZR.\n- Similar to the tree mover’s distance, HTD does not meet the criteria for defining a valid kernel for tree-structured data, as it is not conditionally negative definite.\n- Including standard deviations (STD) in the results of Tables 3 and 4 would be beneficial, as the variances are large; for instance, the STD of accuracy values for MUTAG and BZR might be around 5."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- Only until Figure 2 the authors demonstrate examples of Schema. However, the sample 1 and sample 2 are essentially trees, it still remains unclear how HS-Trees to GNN in Section 2.1. Could the author provide an illustrative figure to show the relation?\n- The font size is a bit bigger than usual?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors introduce a new distance for hierarchically-structured trees based on Leaves, Bags, and Dicts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces hierarchically-structured tree distance (HTD) between HS-Trees."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The motivation and explanation of the use of HS-trees and their distance are unclear in the introduction. The authors first say that the properties of HT-Trees are used in existing work. Then, in the next paragraph, the authors mention that the distance on HS-Trees has been studied very little. Among the properties used in previous work, were there HS-Tress distance?\n- In the introduction and background, it’s unclear if the term HS-Trees is from previous work or if the term is first given by the authors. In addition, the background of HS-Trees, sample, and schema are very confusing. It would be clearer if the authors could provide an example of what they are here. Moreover, it’s unclear how bags are assumed to be “permutation invariant; therefore, the position has to be encoded through position encoding” and how the “universal approximation theorem for HS-Trees has been proved in Pevny & Kovarik (2019).”\n- The authors mention the motivation of the work is “measuring the distance between samples emerging from popular data storage formats (e.g., JSON, XML, and ProtoBuffer)”. However, in the experiment section, there is no application in such data format. It is misleading to use the data storage formats as motivation in the introduction, but there is no related experiment\n- In Section 2.1, it is unclear if the authors are trying to claim that GNNs are “hierarchically-structure data”. In the context of GNN, it’s hard to see what sample, schema, and even HS-Trees are.\n- The authors claim that the proposed distance is differentiable, however, there is no theoretical proof\n- The term Leaves is commonly used in tree structure data. It will be important to differentiate the difference between common tree structures and HS trees. In addition, it’s unclear how and why the definition of Leaves in Section 3. 1 is outside of the scope\n- There is no proof for Theorem 1. The justification right after Theorem 1 is hard to follow.\n- It is hard to link the relation between Eq. (5) and the distance in Table 2, even with the brief introduction in Appendix B. A simple proof or derivation could have helped to understand the connection.\n- It’s unclear what the relation between HTD and TMD is. A simple proof or derivation could have helped to understand the connection.\n- The README.md files in the provided link to the code are not sufficient to reproduce the HTD and experimental results.\n- The experimental setup is unclear in the main texts. It’s unclear what the actual classification tasks and anomaly detection are. Also, while Appendix A includes the implementation details, there is no reference in the main text link to the appendix. In addition, in Appendix A, the authors claim that the experiments are repeated five times, and there is no variance or standard deviation reported in the performance in the main text.\n- It’s unclear what the colors represent in Figure 3. It’s unclear why and what “—” represents in Tables 3-5.\n- The paper needs more proofreading: i) additional ) in line 029, ii) line 250 missing a period and there is an additional ), iii) notation is very hard to follow; a notation is given with confusing comma\n\n## Minor\n- There if no reference and introduction when the Mutagenesis dataset is first mentioned in line 317"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024differentiable,\ntitle={Differentiable Distance Between Hierarchically-Structured Data},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yDlvteYBbF},\nnote={under review}\n}"
},
"abstract": {
"value": "Many machine learning algorithms solving various problems are available for\nmetric spaces. While there are plenty of distances for vector spaces, much\nless exists for structured data (rooted heterogeneous trees) stored in popular\nformats like JSON, XML, ProtoBuffer, MessagePack, etc. This paper\nintroduces the Hierarchically-structured Tree Distance (HTD) designed\nespecially for these data. The HTD distance is modular with differentiable\nparameters weighting the importance of different sub-spaces. This allows\nthe distance to be tailored to a given dataset and task, such as classification,\nclustering, and anomaly detection. The extensive experimental comparison\nshows that distance-based algorithms with the proposed HTD distance\nare competitive to state-of-the-art methods based on neural networks with\norders of magnitude more parameters. Furthermore, we show that HTD is\nmore suited to analyze heterogeneous Graph Neural Networks than Tree\nMover’s Distance."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Distance",
"Distance function",
"Tree-structured data",
"Heterogenous Graphs",
"JSONs",
"Multiple Instance Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/66137360ff65a367c9b78659e3cabfa7a4f37f96.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Differentiable Distance Between Hierarchically-Structured Data"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yDy9fZXNJV | The Graph's Apprentice: Teaching an LLM Low-Level Knowledge for Circuit Quality Estimation | main | Active | LLM;Knowledge Distillation;Verilog;Graph Neural Network | applications to computer vision, audio, language, and other modalities | 3;5;5;6 | 4;5;4;2 | 2;3;3;3 | 2;3;3;3 | 1;4;3;3 | 4.75 | 3.75 | 2.75 | 2.75 | 2.75 | -0.473684 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "This has already been addressed in the Weaknesses section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The overall research motivation is well-articulated, and the proposed idea is both interesting and has promising applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The main contributions of this work, as reported, are:\n\nA. Development of a VeriDistill framework: The first truly end-to-end machine learning model that processes raw Verilog code directly, without preprocessing, to accurately estimate circuit area and delay metrics.\n\nB. Innovative Knowledge Distillation (KD): A novel knowledge distillation method is applied during training, transferring low-level circuit insights (as LUT graphs) back into the model for enhanced predictions. Experiments show that this approach surpasses previous state-of-the-art (SOTA) baselines on a large-scale Verilog dataset, with improved generalization to out-of-distribution (OOD) data. The use of both LLM representations and knowledge distillation is critical to the model’s performance, as omitting either component decreases performance below baseline levels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The manuscript lacks clear description of the proposed approach. Both figure 1 and figure 2 should be improved in both schematic and description. Methodology section 3.1 must be elaborated. I will advice to break the training strategies and steps at first and finally combine them towards final goal/results. It will enhance readability, understanding of the general audience with more detailed explanation of the intermediate training flows. Figure 1 & 2 and section 3.3 description seems disjointed. Reproducibility from this text description is problematic.\n\n2. in Section 4.1 EXPERIMENTAL SETUP, \"The model’s decoder feedforward network depicted\nin Fig. ?? is designed so that the 512-dimensional representations from the...\", Fig. reference is missing.\n\n3. The overall quality of writing and presentation could be improved. The writing should maintain a smoother flow, as some paragraphs feel disjointed. For instance, in the experimental setup section 4.1 , certain details—such as the explanation of 512-dimensional state tokens—are important for understanding the methodology and would benefit from clearer integration.\n\n4. It is recommended to compare the performance of the proposed model with previous models/approaches to better emphasize its novelty, particularly in terms of computational complexity, data preprocessing demands, resource utilization, and the metrics of area and delay. For instance, could you provide an analytical comparison of your approach against the work by Fang et al. (2023/2024b), addressing the computational time for SOG and the complexity of converting linguistic data into bit-level operators using the logic synthesis tool Yosys (Wolf et al., 2013)? This comparison would further clarify the advantages of your approach, especially given that accuracy, precision, and sensitivity are crucial metrics for circuit quality estimation in addition to computational efficiency.\n\n5. In Fig. 4, please clarify the presence of scattered outlier points for both area (lower values) and delay (higher values) in relation to the fitting curve. These sparse data points appear to indicate cases where VeriDistill’s performance aligns with that of other baseline models. Could you explain why VeriDistill performs similarly to the baselines for these specific outliers, and discuss any factors that might contribute to this behavior in terms of model limitations or specific data characteristics?\n\n6. Authors explained \"Finally, in Figure 5, we present the t-SNE projection of the last hidden space representations on the test data from the teacher model (Z_teacher) trained for predicting log-area, alongside those from the LLM-based models. As can be seen, the resulting t-SNE representation of the VerDistill model appears very similar to the one of the LUT-GNN teacher model, albeit slightly rotated by roughly 30 degrees to the right (which is arbitrary and immaterial). The AST-GNN with KD model also can be seen as a rotation of the LUT-GNN plot, roughly 90 degrees to the left. In contrast, the plot of the CodeV + Decoder appears much more like an undefined mass.\" \n\nIt seems hypothetical assumption about the statement \"VerDistill model appears very similar to the one of the LUT-GNN teacher model, albeit slightly rotated by roughly 30 degrees to the right (which is arbitrary and immaterial). The AST-GNN with KD model also can be seen as a rotation of the LUT-GNN plot, roughly 90 degrees to the left.\". AST-GNN with KD is quite different than LUT-GNN and VeriDistill, as well it does not seem \"t-SNE representation of the VerDistill model appears very similar to the one of the LUT-GNN teacher model\"...and if the authors knew already \"slightly rotated by roughly 30 degrees to the right\".. please adjust this rotation to make it fit overlapped and compared.\n\"\"\n7. Please be consistent with proposed approach name .......authors mentioned \"VerDistill\" in one place and \"VeriDistill\" in other place(s). Please correct any typos. Also, grammatical errors have been noticed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) Does the knowledge distillation help with a specific subset of Verilog designs (more than others)? Could be interesting to exploit this.\n2) Please elaborate on the training times\n3) There are many ways to code (e.g an adder) in Verilog, does the framework handle this well? \n4) Curious to hear whether you think this approach can scale to handle other RTL (e.g VHDL)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "As a hardware engineer, I greatly appreciate the contributions in the work as this can provide a lot of benefit to practioners.\n\nI find the knowledge distillation approach between the GNNs and LLMs to be novel. The work showcases strengths of GNNs and LLMs in a creative fashion. The work also shows that LLMs have remarkably learned about the underlying circuit representation underneath the verilog code.\n\nThe results are also impressive, showing improvements over baselines including the out-of-distribution datasets like OpenABCD."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Introduction of a new ML framework: VeraDistill to predict circuit QoR metrics like area/delay bypassing costly logic synthesis processes. The framework uses a verilog-trained LLM (codeV-7B) to produce a representation (final layer) to feed into the FFN to make predictions for the QoR metrics during inference. To train, they take the AIG LUT outputs of the synthesis flow and use GNNs to produce embeddings where the GNN acts as a \"teacher\" resulting in knowledge distillation. The loss metric used for final training is a simple MSE involving not only the FFN predictions and synthesis QoR data, but also the final layer data from the GCN(teacher) and LLM (Student). The LLM is frozen in the training process."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Its unclear to me how reliant the framework is on a particular LLM (CodeV) and how versatile a single instance (trained) will be in accommodating more unseen RTL code. The authors do acknowledge the lack of Verilog/RTL code for training, so while not a weakness per se, it does make me question the generalization capabilities.\n\nNot much details are shared about the training resources (outside of the 8 V100 GPUs used)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. For Table 1, you state that knowledge distillation has almost no effect on the AST-GNN model. But in Figure 5, you show that the knowledge distillation makes the t-SNE representation of AST-GNN more like that of the teacher model. These statements seem contradictory and confusing. Could you provide further clarification?\n2. For Table 2, could you also report the performance of the teacher model on the out-of-distribution dataset? This would help clarify the current gap in performance.\n3. It seems VeriDistill can only handle small Verilog designs due to the context window length constraint of the LLM base model. Is there a way to scale VeriDistill for larger designs? Given that logic synthesis for small designs is usually fast and the error remains significant, the current model may not be very attractive.\n4. When the logic synthesis recipe changes, the delay and area metrics of circuits typically change as well. How can VeriDistill handle these variations?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This work introduces VeriDistill, an innovative end-to-end machine learning model that predicts circuit quality directly from raw Verilog code, which is an important and challenging task in the domain of electronic design automation (EDA). It applies a novel knowledge distillation method, transferring low-level circuit insights via LUT graphs into an LLM-based predictor. And VeriDistill demonstrates robust performance, outperforming state-of-the-art baselines on large-scale and out-of-distribution datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents VeriDistill, the first end-to-end machine learning model that processes raw Verilog code to predict circuit quality metrics. The model uses an innovative knowledge distillation technique, transferring low-level circuit insights via LUT graphs into an LLM-based predictor. Experiments show that VeriDistill surpasses current state-of-the-art methods on large-scale Verilog datasets and demonstrates transferability to out-of-distribution datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are several limitations for VeriDistill. Firstly, as shown in Table 1, VeriDistill’s performance lags significantly behind the teacher model. To improve this, I think a possible way is to unfreeze some parameters in the base LLM and fine-tune them with the decoder. Secondly, the paper only uses CodeV as its base model without exploring other Verilog LLMs that might provide different insights. Conducting more experiments with various models could better demonstrate the method's effectiveness and robustness. Additionally, the paper needs careful proofreading; for instance, there is a missing figure referenced in Line 293."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1.Given the token count limitations of LLMs, which may prevent the framework from handling large Verilog designs, and the relatively quick and low-cost process of transforming smaller designs into netlists, wouldn’t it be more effective to use a GNN to process the netlist representation directly?\n2.Why transform aiger into LUT? What’s the advantage of LUT? And in Advanced integrated circuit (IC) manufacturing process, whether LUT is widely used?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper is the first to implement knowledge transfer of circuit quality estimation from netlist-level to RTL-level. Considering that netlist-level circuit performance and area estimation is more precise and RTL-level circuit quality prediction is faster, this work strike a balance between the precision and inference speed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a framework, which utilize a GNN trained on LUT netlist-level graph as a teacher in order to transfer knowledge to LLM encoder based circuit quality estimator. It enables model to get more accurate performance and area of circuits end-to-end with RTL as input."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In this work, the LLM is used as an encoder, leveraging its embeddings to predict circuit quality. However, a potential limitation arises from the token count restrictions of LLMs, which may prevent the framework from handling large Verilog designs. For smaller designs, which can be processed within these token constraints, transforming them into netlists is relatively quick and incurs minimal computational cost. Given this, why not employ a GNN to process the netlist representation directly? Predictions at the netlist level are generally more accurate than those at the RTL level, making this a potentially more effective approach. Moreover, the authors mainly compare there work with models trained on AST. Such an experiment lacks persuasive power.\nThings to improve: The ‘Fig. ??’ in section 4.1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024the,\ntitle={The Graph's Apprentice: Teaching an {LLM} Low-Level Knowledge for Circuit Quality Estimation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yDy9fZXNJV},\nnote={under review}\n}"
},
"abstract": {
"value": "Logic synthesis is a crucial phase in the circuit design process, responsible for transforming hardware description language (HDL) designs into optimized netlists. However, traditional logic synthesis methods are computationally intensive, restricting their iterative use in refining chip designs. Recent advancements in large language models (LLMs), particularly those fine-tuned on programming languages, present a promising alternative. This work proposes augmenting LLMs with predictor networks trained to estimate circuit quality directly from HDL code. To enhance performance, the model is regularized using embeddings from graph neural networks (GNNs) trained on Look-Up Table (LUT) graphs, thereby incorporating lower-level circuit insights. The proposed method demonstrates superior performance compared to existing graph-based RTL-level estimation techniques on established benchmarks, such as OpenCores and OpenABCD, while providing instant feedback on HDL code quality."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"Knowledge Distillation",
"Verilog",
"Graph Neural Network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/64c6dce5011e7098b9ee10b08b7343d0dd636511.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "The Graph's Apprentice: Teaching an LLM Low-Level Knowledge for Circuit Quality Estimation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yEPNPbF8E7 | SEED-Story: Multimodal Long Story Generation with Large Language Model | main | Withdraw | LLM;Story telling;multi-modal generation | applications to computer vision, audio, language, and other modalities | Shuai Yang;Yuying Ge;Yang LI;Yukang Chen;Yixiao Ge;Ying Shan;Ying-Cong Chen | ~Shuai_Yang7;~Yuying_Ge2;~Yang_LI82;~Yukang_Chen1;~Yixiao_Ge2;~Ying_Shan2;~Ying-Cong_Chen1 | 3;3;3;6;6 | 3;3;4;5;4 | 2;2;2;4;3 | 2;2;3;4;3 | 4;3;2;3;3 | 4.2 | 3.8 | 2.6 | 2.8 | 3 | 0.763763 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "At the review stage, I could easily find the corresponding materials of this submission : (1) GitHub repository is publicly available at https://github.com/TencentARC/SEED-Story, and (2) arxiv paper at https://arxiv.org/ab/s2407.08683.\n\nEven it seems rather popular these days, \nuser double-blind review, I think that there is a possibility of making effects on reviewers biased in any forms."
},
"flag_for_ethics_review": {
"value": [
"Yes, Other reasons (please specify below)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weaknesses"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The samples of generated outputs demonstrated in the manuscript look impressive, especially in long story generation, character consistency, and narrative text quality. \n This work would be significant when these examples objectively represent general performance.\n\n2. The introduction of the StoryStream dataset is a valuable contribution that could advance future research in this field.\n The authors present StoryStream Dataset, which is already downloadable at https://huggingface.co/datasets/TencentARC/StoryStream."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a method, SEED-Story, for the task of image-text multimodal story generation.\nThey design architectures that tokenize images for tuning large language models (LLMs) and detokenize them for image generation. They also employ a multimodal attention sink to facilitate long story generation. \nThey evaluate experimentally the quality of generated images and assess story quality with a baseline method, MM-interleaved. \nAdditionally, a novel dataset, StoryStream, is introduced."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper lacks essential elements such as a clear motivation and problem definition, making it difficult to judge whether the research goal is successfully achieved. \n Also, those make readers confused about whether the evaluation metrics and comparative methods are appropriate or sufficient.\n\n2. In order to validate academic value, the manuscript should provide evidence theoretically or experimentally. \n The manuscript shows the quality of generated images with FID and CLIP score as an ablation study, and relative comparison story quality with a baseline method, MM-interleaved. \n I think the materials above are NOT enough to clarify the validity of improvement to keep fair comparison and reproducibility. \n \nThe results shown are promising, but clearer motivation, explicit research questions, and rigorous comparative experiments would strengthen the paper. The contribution of a new dataset is notable, yet the lack of comprehensive evaluation limits the ability to objectively assess the work's significance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How may participants were involved in user study presented in Section A of Appendix?\n- My understanding is dense attention uses attentions between all tokens, and the proposed multimodal attention sink mechanism uses a subset of the tokens. So I think dense attention may perform (almost) equally well as multimodal attention sink mechanism even though it is more expensive. Can you clarify this?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The proposed dataset has high-quality and high-resolution images with longer story length, which would be useful for research community.\n- The proposed model seems to make sense, and the presented analysis for multimodal attention sink mechanism is nice.\n- The experimental results show that the proposed approach outperforms the baseline on the proposed dataset and demonstrate its effectiveness even though it still has a limitation (please see the weaknesses below)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tackles the task of multimodal story generation which generates both story text and corresponding images from give input image and text, by proposing SEED-Story model utilizing Multimodal Large Language Model. Additionally, authors extend attention sink mechanism and introduce a multimodal attention sink mechanism to enable the model to generate long sequences in an efficient manner. Finally, a new dataset called StoryStream was proposed for training and benchmarking the task of multimodal story generation with longer story length. In experiments, it has been shown that the proposed approach outperforms the baseline, MM-interleaved (Tian et al. (2024)), in both quantitative and qualitative evaluations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The novelty of proposed approach seems somewhat incremental as it looks to share some ideas with MM-interleaved (Tian et al. (2024)). Also the proposed multimodal attention sink mechanism seems just a slight modification of existing attention sink mechanism even though it includes nice analysis.\n- The proposed approach was only evaluated on the proposed StoryStream dataset. However, as MM-interleaved (Tian et al. (2024)) was also evaluated on both Pororo and Flintstones datasets in their paper, it would make the paper stronger with additional evaluations on more datasets.\n- Another major concern is baseline choice. The authors just chose MM-interleaved, but I think it is still possible to use more natural baselines. e.g., generating story text first and then generate corresponding images with existing story visualization approaches.\n- The experimental result is not so convincing. e.g., in Fig. 7 (bottom), in the generated image sequence, the hat appeared and disappeared, and the color of hat was changed, so its consistency seems still not very good."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. For Figure 8, at what size sequence length does the sliding window approach stop working? I would also like to know for Table 2 what sequence length this is? \n\nIt would be interesting to show the FID/clip score at different sequence lengths. E.g. for seq length = 10 I expect all the methods are equivalent, but I am not sure how the numbers compare with seq length = 20, 30, 40, 50 . Does the multimodal attention sink only help when the test seq length is much larger than the training? \n\n2. For section 5.1 please clarify the differences between MM-interleaved and seed-story (e.g. architecture/base models)\n\n3. Does the mm attention sink affect the quality of the text? Figure 8 / Table 2 are mostly concerned with the quality of the images"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The strongest contribution of the paper is the new dataset (StoryStream). There seems to be a need for this new dataset, given that the existing datasets look quite low-resolution and simple. It will be useful for other papers to use this dataset to train and evaluate.\n\nThe major technical contribution of the paper is the multimodal attention sink. This is quite an interesting non-obvious observation that the model attributes high attention to tokens near BoI / EoI. Then the authors take this observation and propose a multimodal attention sink which seems to be useful for generating longer stories. This contribution could be useful in other problems outside of story generation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a new method for story generation (seed-story), including the generation of images along with text. The model to generate the stories is based on a multimodal LLM. Images are tokenized with a ViT based tokenizer. Then images can be fed into an LLM to predict the next tokens, which could be images or text. A detokenizer, based on SDXL is then used to generate viewable images from the tokens. The image detokenizer and the LLM are finetuned for this specific task, with LORA being used for the LLM.\n\nThe authors introduce a new dataset, StoryStream, which consists of interleaved image/text stories to train their model. To create the dataset, the authors first sample keyframes and corresponding subtitles from children's cartoons. Then a caption model is used to caption frames. Finally GPT-4 is used to generate a consistent story given the subtitles and captions. The final dataset is larger in terms of story length and higher resolution compared with existing datasets.\n\nSeed-story also attempts to generate longer stories. Long stories are difficult to generate given the limited training data. Therefore the authors use a \"train-short-test-long\" approach to generate longer videos. The authors introduce a variation on attention-sink for multimodal data. Attention sink uses a sliding attention window plus a few initial tokens at the beginning of the sequence. The proposed multimodal attention sink also includes the tokens corresponding to the beginning and end of the image tokens. It is shown that this approach generates much higher quality story images.\n\nSeed-story is primarily compared with MM-interleaved, another approach that can be used to generate stories. Compared with MM-interleaved seed-story achieves lower FID, better style consistency, story engagement, and image-text coherence."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The long generated stories don't seem to be very good in terms of storytelling. For example, Figure F does not really suggest any logical story (e.g. with a beginning, middle, end). It seems more like random captions were generated, especially near the end. The llm also starts repeating itself often (e.g. \"A man in a hat and shirt looked surprised\", \"George, the cartoon monkey, stood in a grassy area\". The authors in the paper say that this plot is \"engaging\", but IMO it is not engaging. \n\nThis is a difficult task, but a simple baseline is to have a text only LLM generate a curious george story. The story in Figure F looks quite poor compared to a pure text baseline. Another simple baseline would be to have a text-llm generate a story and then have a diffusion model generate an image for each paragraph. This might lack consistency between images, but overall might be better, especially if rated by humans. It feels like there is potential in this paper but I feel more iteration is needed to generate reasonable stories.\n\nI'm also wondering how good the StoryStream dataset actually is for this task. From figure 5 it actually doesn't seem to have a very coherent story either, which might be why the trained model struggles. Figure 5 looks like a bunch of image captions with very little narrative storytelling. To be fair, it does seem that the other existing datasets have the same issue.\n\nThe main comparison with previous works is with MM-interleaved, but I am concerned about the fairness of these comparisons. MM-interleaved uses stable-diffusion-2-1 and Seed-story use SDXL. SDXL should be much better than stable-diffusion-2-1, and it is possible that the results in Figure 6a,b,d are strongly affected by this. Also, MM-interleaved use ViT-L and seed-story uses ViT-G as the image encoder which might also affect these numbers. MM-interleaved uses Vicuna-13b and seed-story uses llama2 which could affect Figure 6c. The authors do not mention these important differences between MM-interleaved and seed-story in the paper.\n\nNitpicks:\nReferences need to be cleaned up, there are multiple cases of duplicate references. E.g. for Qwen-vl, \"Improved baselines with visual instruction\ntuning\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How do ensure the quality of the proposed dataset, and what are the criteria measured if any?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The tokenizer and de-tokenizer training coupled with multimodal instruction finetuning is neat.\n- The adaptation stage seems useful.\n- The extended multimodal attention sink mechanism is interesting and should be studied more."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work tackles the challenging problem of multimodal story generation – generating coherent textual stories with corresponding images interleaved with them. The work proposes a three-stage procedure, where the first stage trains an image de-tokenizer to regress towards target image features, while the second stage unifies the visual tokenizer and detokenizer with a multimodal LLM to generate simultaneously the image and textual features. Then, the trained model will undergo a final de-tokenizer adaptation step where the diffusion loss will be used to refine the outputs in their pixel quality. The authors also extend the attention sink mechanism with “focused” tokens on several special positions to preserve story length and consistency.\nThe work features additionally a collected multimodal story generation dataset, which is also used to evaluate the above proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the proposed method is evaluated well on the featured StoryStream dataset, the method should still be evaluated on existing ones such as Pororo and Flint Stones.\n- While I appreciate the proposed dataset, the visual consistency test is still lacking and a well defined metric particularly for visual consistency is much needed.\n- The attention sink mechanism is not well-studied yet in this manuscript, for example, how does the performance with its introduction scale with lengths, story complexities, and character consistencies?\n- Some human evaluation is needed for the results comparisons."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "+ Why attention sink always choose the beginning of the text/image tokens, what is the insight behind this. How about long sequence and the current generation is not quite relevant to the beginning.\n+ The evaluation metric is not very comprehensive for the story generation setting. It only shows FID and CLIP score. First of all, CLIP score is not very reliable for identifying the fine-grained vision-language similarity. Except for the human evaluation, is there any metric for demonstrating the image consistency and character accuracy?\n+ The paper targets on multimodal story generation setting. How to demonstrate the text generation follows a coherent and logical storytelling progression?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "+ The paper curated a high-resolution long story dataset StoryStream that contains 257k images with story length up to 30 frames.\n+ It proposes a multimodal attention sink mechanism for efficient long-sequence generation.\n+ The qualitative results are impressive for generating long multimodal stories."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presented SEED-Story, aiming to generate long, coherent stories combining narrative texts with visually consistent images, addressing challenges in multimodal story generation. It proposes a multimodal attention sink mechanism for efficient long-sequence generation, and release the StoryStream dataset to benchmark the model. Through comparison and user studies, the authors claim SEED-Story outperforms existing models in terms of coherence, visual quality, and style consistency in multimodal storytelling."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ The model architecture is quite similar SEED, with proposed multimodal attention sink.\n+ The evaluation is not very comprehensive, giving existing literature also evaluates image consistency and character accuracy in the story generation setting."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nyang2024seedstory,\ntitle={{SEED}-Story: Multimodal Long Story Generation with Large Language Model},\nauthor={Shuai Yang and Yuying Ge and Yang LI and Yukang Chen and Yixiao Ge and Ying Shan and Ying-Cong Chen},\nyear={2024},\nurl={https://openreview.net/forum?id=yEPNPbF8E7}\n}"
},
"abstract": {
"value": "With the remarkable advancements in image generation and open-form text generation, the creation of interleaved image-text content has become an increasingly intriguing field. Multimodal story generation, characterized by producing narrative texts and vivid images in an interleaved manner, has emerged as a valuable and practical task with broad applications. However, this task poses significant challenges, as it necessitates the comprehension of the complex interplay between texts and images, and the ability to generate long sequences of coherent, contextually relevant texts and visuals. In this work, we propose SEED-Story, a novel method that leverages a Multimodal Large Language Model (MLLM) to generate extended multimodal stories. Our model, built upon the powerful comprehension capability of MLLM, predicts text tokens as well as visual tokens, which are subsequently processed with an adapted visual de-tokenizer to produce images with consistent characters and styles. We further propose multimodal attention sink mechanism to enable the generation of stories with up to 25 sequences (only 10 for training) in a highly efficient autoregressive manner. Additionally, we present a large-scale and high-resolution dataset named StoryStream for training our model and quantitatively evaluating the task of multimodal story generation in various aspects. \nAll models, codes and datasets are released in https://anonymous.4open.science/r/SEED-Story."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Shuai_Yang7",
"~Yuying_Ge2",
"~Yang_LI82",
"~Yukang_Chen1",
"~Yixiao_Ge2",
"~Ying_Shan2",
"~Ying-Cong_Chen1"
]
},
"authors": {
"value": [
"Shuai Yang",
"Yuying Ge",
"Yang LI",
"Yukang Chen",
"Yixiao Ge",
"Ying Shan",
"Ying-Cong Chen"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"Story telling",
"multi-modal generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "yang|seedstory_multimodal_long_story_generation_with_large_language_model"
},
"pdf": {
"value": "/pdf/21bf8ccf6d62f46403c12cf7f9d54386d665569a.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/fcd4359397ddf2eac3ffe411a8d312171ff0b725.zip"
},
"title": {
"value": "SEED-Story: Multimodal Long Story Generation with Large Language Model"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
yEnJvc7ogD | An Efficient Plugin Method for Metric Optimization of Black-Box Models | main | Active | optimization;black box systems;domain adaptation;distribution shift;classification | transfer learning, meta learning, and lifelong learning | 3;3;5;5 | 4;5;4;4 | 3;2;2;2 | 2;2;2;2 | 4;3;2;3 | 4 | 4.25 | 2.25 | 2 | 3 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Does the order of class pairs impact the iterative process for finding optimal weights?\n2. CWPLUGIN currently does not take class frequency or class imbalance in the validation dataset into account. Could incorporating these factors enhance performance, particularly in label-shift scenarios?\n3. Why was BERT-FT chosen as a baseline for comparison instead of fine-tuning other used black-box models like DistilBERT, RoBERTa, or DistilRoBERTa? For example, section 4.2.1, black-box model is DistilBERT, but we are comparing to BERT-FT."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "**Originality:** While CWPLUGIN shares the concept of post-processing prediction probabilities through reweighting, it is uniquely motivated by the need to adapt black-box models to specific target domains, providing a fresh perspective on model adaptation for restricted-access models.\n\n**Quality:** CWPLUGIN demonstrates strong performance across a range of scenarios without requiring large amounts of validation data. And its design can be adapted to many settings. Their experiment shows promising results.\n\n**Clarity:** The motivation for CWPLUGIN is clear to me. However, the algorithmic section could benefit from further clarification (see below).\n\n**Significance:** This paper addresses a critical and emerging challenge in the field: adapting black-box models for task-specific needs. The proposed method shows effectiveness and efficiency by adapting predictions through simple probability reweighting, providing a practical solution for the community’s growing demand for adaptable AI tools."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Black-box or proprietary models often limit users to accessing only predictions via API calls, making it difficult to adapt the model behaviers to align with specific user preferences. This paper introduces CWPLUGIN, a plug-and-play method that reweights prediction probabilities to match users' desired metrics or target domains. CWPLUGIN uses a coordinate-wise search algorithm that iteratively finds optimal relative weights for each class pair to achieve the desired reweighting. Empirical results show that CWPLUGIN outperforms existing calibration methods and fine-tuning approaches, especially when data size is limited. Additionally, CWPLUGIN proves effective in handling scenarios with label shift and label noise. This framework offers new opportunities for users to adapt black-box models to their tasks, allowing for a degree of customization previously unavailable."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Algorithm 1:** The CWPLUGIN algorithm iterates over each class pair; however, the notation used is unclear. The symbol m denotes the number of classes but is also used as a class index, which creates confusion. Additionally, in line 3 of the algorithm, a single for-loop is shown, which does not accurately reflect the process of iterating over class pairs. Using a nested for-loop structure would better clarify this.\n\n**Class Size:** The experiments were conducted on datasets with a relatively small number of classes. It would be beneficial to include results on datasets with a larger number of classes, such as adapting to specific metrics on CIFAR-100, TinyImageNet, ImageNet, to demonstrate the scalability of CWPLUGIN when m is high.\n\n**API Call Expense for Validation:** The paper could discuss how the cost of API calls increases with the size of the validation dataset, which may be a limitation in scenarios where extensive validation data is needed to reach desired performance.\n\n**Suggested References:** Two relevant papers are suggested to read and include:\n* The first paper leverages relational information in label space to reweight prediction probabilities without requiring validation datasets: https://arxiv.org/abs/2307.12226.\n* The second paper adapts black-box models by steering model outputs using a combination of tuned and untuned models: https://arxiv.org/abs/2401.08565."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "**Clarification regarding Section 4.1 setting**: In Section 4.1, could you elaborate on how the regression model is used for classification tasks? What is the process for converting regression outputs into discrete class labels? Are there predefined thresholds or ranges used to map continuous outputs to specific classes?\n\n**Clarification regarding section 4.2.1 setting**: distilBERTbased is used a the base model while a finetuned version of BERT is used as a baseline. What is the reason behind choosing a different model as a basline? Would it be more appropriate to use a fine-tuned version of distilBERT (distilBERT-FT) as the baseline for a more direct comparison? Alternatively, why not use BERT as the base black-box model to maintain consistency with the baseline (perhaps a more apples-to-apples comparison)? How might these different choices impact the interpretation of the results and the conclusions drawn from the comparison?\n\n**Clarification regarding section 4.2.2 setting**: Why test with different base models for lmemotions and lmemotionsOOD.? how about using the same base model across all tasks? If different base models are necessary, could an additional ablation study be conducted using a single base model across all tasks to isolate the effect of the model choice?\n\n**Clarification regarding section 4.3 setting**: The authors mention on lines 484-485 that ‘For both the label shift and label noise settings, we utilize a model trained on GLUE (Wang et al., 2019) and ANLI as our base, black-box predictor’ but did not mention which model used. What model/LLM is used for this task?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The method is computationally very efficient as it solely relies on output probabilities of the black-box model. \n\nThe authors also propose ways to further save costs when data is balanced across classes by using parallelization or when the metric it is optimizing has a specific quasi-concave shape; these are interesting observations.\n\nThe method is also sample efficient, requiring only a few additional samples to optimize the weights w. \n\nThe paper is well-written and the presentation is good overall."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the author propose CWPLUGIN, a post-hoc method that adapts black-box machine learning models to new target distributions and optimizing specific performance metrics without requiring access to the model's internals (e.g. training details, metric optimizes, hyperparameters used, etc.). CWPLUGIN is a post-hoc method as it takes in as input the set of probabilistic multiclass predictions (e.g. softmax outputs) on a target domain and their corresponding true labels, thereby disregarding any feature information. CWPLUGIN optimizes metrics that are simple functions of the confusion matrix and outputs new class weights, one for each class. The method can be made efficient when the data is balanced across classes or when the metric it is optimizing has a specific quasi-concave shape."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Limited novelty**: The authors mention the “probing classifier” approach Hiranandani et al. (2021) that solves a, global linear system in order to find the weights which optimize a particular metric. CWPLUGIN is instead local in that it considers only pair-wise comparisons between classes. Is this the only difference between 'probing classifier' Hiranandani et al. (2021) and CWPLUGIN? \nIt is unclear if this difference constitutes a significant advancement, especially since the probing classifier already performs very well (e.g. in Figure 6, Probing in fact outperforms in terms of MCC and G-mean. Similarly, Figure 2 shows only marginal improvements in F-measure and accuracy compared to other methods, especially the probing classifier).\n\n**Key literature missing**: The paper could benefit from citing some very important studies in the calibration literature that are relevant to the discussion, including:\n\n1)\tZihao Zhao, Eric Wallace, Shi Feng, Dan Klein, and Sameer Singh. Calibrate before use: Improving few-shot performance of language models. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th ICML 2021.\n\n2)\tAbbas, M., Zhou, Y., Ram, P., Baracaldo, N., Samulowitz, H., Salonidis, T., and Chen, T. Enhancing in-context learning via linear probe calibration. In Proceedings of The 27th AISTATS 2024.\n3)\tZhixiong Han, Yaru Hao, Li Dong, Yutao Sun, and Furu Wei. Prototypical calibration for few-shot learning of language models. ICLR 2023.\n\n4)\tHan Zhou, Xingchen Wan, Lev Proleev, Diana Mincu, Jilin Chen, Katherine Heller, and Subhrajit Roy. Batch calibration: Rethinking calibration for in-context learning and prompt engineering. ICLR 2024.\n\n5)\tZhongtao Jiang, Yuanzhe Zhang, Cao Liu, Jun Zhao, and Kang Liu. Generative calibration for in-context learning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of EMNLP 2023.\n\n6)\tM. Shen, S. Das, K. Greenewald, P. Sattigeri, G. Wornell, and S. Ghosh. Thermometer: Towards universal calibration for large language models. ICML 2024.\n\nThese papers focus on calibration for LLMs and could also serve as valuable baselines, especially because the authors do studies on language tasks. Including them could strengthen the paper's comparative analysis and contextualize its contributions within the broader field of language model calibration. \n\nNotably, Zhao et al. (2021) and Abbas et al. (2024) are especially pertinent, since these are also very simple post-hoc methods that use a handful of samples in the range comparable to the one used by the authors (i.e. on the order of tens or hundreds of examples). While these papers don't directly address the specific metrics used by the authors, they demonstrate high utility in terms of accuracy. It would be valuable to evaluate their performance on the metrics employed in this study, such as F-measure, MCC, and G-mean. Comparing CWPLUGIN against these methods would provide a more comprehensive assessment of its effectiveness.\n\n\n**Marginal improvement over baselines**: Figure 2 shows only marginal improvements in F-measure (e.g. 0.579 vs 0.576) and accuracy (e.g. 0.619 vs 0.617) compared to other methods, especially the probing classifier. Moreover, in Figure 6, Probing in fact clearly outperforms CWPLUGIN in terms of MCC and G-mean.\n\nSimilarly, in Figure 3, on Language tasks, we observe marginal improvement over FullDirich baseline on most metrics except F-measure (e.g. 0.563 vs 0.562 on accuracy, 0.256 vs 0.255 on MCC).\t\n\nCompared to the baselines, CWPLUGIN is the least performing method in terms of accuracy and MCC for lmemotions (figure 8) and one of the least performing methods on lmemotionsOOD (Figure 9). The results are perhaps good for applications where different evaluation metrics (e.g. F-measure) are more critical than mere predictive accuracy. However, I think even improvements on these metrics are marginal in most cases.\n\n**Figure 5 table ANLI with label noise results for G-mean do not match results in Figure 13 in the appendix. In Figure 13, Clean baseline always outperforms CWPLUGIN for all Validation Set sizes where as in Figure 5, Clean has lower mean of 0.528 as compared to 0.541 of CWPLUGIN.**\n\n**Reproducibility**: The code for reproducing the results has not been provided, which may raise concerns about the reproducibility of the findings. Making the code available would enhance transparency.\n\n**Minor comments**: \nLine 263: typo: “…linear-diagonal metric Both results…” has a missing full-stop."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The algorithm’s description is confusing. The algorithm is only iterating over k, but the caption shows iteration over class pairs. The symbol m is abused, sometimes indicating number of classes and sometimes a specific class. \n2. What metrics are the original models optimizing? This is important for understanding the results. \n3. The authros should re-evaluate whether a number should be bolded in each table. When there is overlap in the error intervals, the result is not usually not considered significant. \n4. Table captions should be above the table. Figure 5 is a table. Figures and tables should not be combined together as is the case for Figure 3 and Figure 2. Please present them individually. \n5. The abstract on openreview is different from that of the paper. Please converge on a single version of the abstract."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. CWPlugin is a simple and explainable optimization method, without relying on black-box procedures. Non-ML experts could adopt this algorithm. \n2. The algorithm in theory recovers the Bayes optimal classifier. \n3. Empirical performance is consistent across datasets, and equally importantly, across varying validation set sizes. It works on several metrics related to the confusion matrix and under distribution shift settings. \n4. Relvant to LLM practitioners, the method beats fine-tuning on BERT when data is limited."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes CWPlugin, a post-hoc algorithm to optimize a black-box ML model’s performance on a specifci metric. The algorithm iterates over class pairs and optimizes the decision boundaries à la Bayes optimality. The authors proves that the method is theoretically more efficient than brute force search with a binary search implementation in the average case. Empirically, the method is compared against several post-hoc and calibration baselines on prediction tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Novelty is limited in the case of CWPlugin. The method’s core is finding an optimal vector w to reweight a black-box model’s output. The novel contribution within the algorithm is the local search procedure over pairs of classes. The improvement here is rather incremental. As a more subtle point, the baselines used in Section 4 is inadequantely discussed in the related work subsection. Only probing is mentioned earlier in the paper. This results in a weak qualitative comparison with prior work.\n2. A major section of the paper shows CWPlugin’s superior efficiency. This advantage is not demonstrated in the empirical section: there are no concrete runtime comparisons. Speedup from parallelization is also unverified. \n3. The presentation of empirical results is not convincing. The tables adopt a specific validation set size, all different depending on the dataset but lacks justification. Sometimes the tables show 4 metrics, and sometimes only 2. This might indicate cherry-picking. The figures show more comprehensive results, but their presentation is poor. Legends are blocking the lines. Figure and axes titles are not formatted properly, and the color bands adds to the confusion. Based on the color bands, CWPlugin does not have an advantage in performance over the baselines due to overlap. \n4. Further justification of why the datasets are chosen is necessary. Most of them also require proper citations. More importantly, the authors need to show these datasets as standard for previous works in the same field. For example, the income predicion dataset is uncommon in deep learning from a quick search. \nOverall, some of these aspects can be fixed and improve the quality of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "**Minor concerns:**\n1. Are there any theoretical analyses for the proposed optimization/adaptation procedure if the metric is not linear-diagonal? I realize this might be a big ask, but additional comments on this would be appreciated.\n2. Clarifying question: is the validation set/test set sampled from the same target distribution (different from the set the black-box model is trained on)?\n3. An important naive baseline to include is directly optimizing $\\mathbf{w}$ on some likelihood objective defined over the validation set $\\mathcal{S}$. This does not optimize for the metrics of interest $f$, but it is important to see the effects this would have on performance.\n4. There is a concern for overfitting or reward hacking of $f$ since $\\mathbf{w}$ is tuned solely on (a small) $\\mathcal{S}$. The empirical results from the test set seem to indicate this is not a major concern. But can the authors comment on this point with more detail?\n5. Importantly, what are the effects of different choices of $\\varepsilon$ on post-hoc adaptation performance?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper focuses on an interesting and relevant problem, namely post-hoc adaptation of a black-box classifier, with only a small sample from a target distribution of interest.\n2. The method proposes a novel way to optimize weights to re-weigh predicted probabilities and applies to multi-class classification.\n3. The paper reads well, with intuition and formal depictions interleaved in a nice way."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method to perform post-hoc adaptation of a black-box classifier, by tuning class weights to optimize a metric of interest on a target distribution. Specifically, the authors introduce CWPlugin, which performs a line search to find the best reweighting coefficient for each (m-1) class, based on pairwise comparisons against a reference class."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Main concerns:**\n1. The authors only compare empirically against calibration techniques. The proposed problem of finding a set of weights $\\mathbf{w}$ to adapt a pre-trained model to optimize a black-box metric (which the authors refer to as `query-access only` metrics) is an **instantiation of black-box optimization**. As such, mature BBO techniques, including Bayesian optimization [R1], population-based search (e.g., evolution strategies) [R2], and even random search, should be included in the empirical comparisons.\n2. The proposed line-search method has quite high complexity $\\mathcal{O}(mn/\\varepsilon)$, and scales linearly with number of classes $m$, and number of samples $n$, and inversely with search resolution $\\varepsilon$. Techniques like BO and ES remove this dependence on $m$, $n$ and might be more suitable. They also do not require the pairwise comparison that is the basis of the line search.\n3. Fundamentally and theoretically, what are the benefits of the author's proposed approach over these BBO techniques (including BO, ES)? Based on my understanding, the proposed technique has some theoretical underpinnings for linear-diagonal metrics, but as the authors emphasize that they focus on 'query-access only' metrics, it is not clear whether there are broader theoretical guarantees.\n4. How is the reference class selected? The authors claim that the choice has `little impact to the algorithm`, but this is not justified. More specifically, how does the choice of reference class impact the optimization, since the line search is performed based on pairwise comparisons (against the reference class)?\n\n[R1] Snoek, J., Larochelle, H. and Adams, R.P., 2012. Practical bayesian optimization of machine learning algorithms. Advances in neural information processing systems, 25.\n\n[R2] Salimans, T., Ho, J., Chen, X., Sidor, S. and Sutskever, I., 2017. Evolution strategies as a scalable alternative to reinforcement learning. arXiv preprint arXiv:1703.03864."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A simple coordinate-wise optimization method to adapt black-box models by post-processing their predictions with a scaling vector."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024an,\ntitle={An Efficient Plugin Method for Metric Optimization of Black-Box Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yEnJvc7ogD},\nnote={under review}\n}"
},
"abstract": {
"value": "Many machine learning algorithms and classifiers are available only via API queries as a ``black-box'' --- that is, the downstream user has no ability to change, re-train, or fine-tune the model on a particular target distribution.\nIndeed, a downstream user may not have any knowledge of the training distribution or performance metric used to construct and optimize the black-box model.\nWe propose a simple and efficient plugin method which takes as input arbitrary multiclass predictions and post-processes them in order to adapt them to a new target distribution, while simultaneously optimizing for a particular metric of the confusion matrix.\nImportantly, the plugin method is \\textit{post-hoc}, does not rely on feature information, and only requires a small number of probabilistic predictions along with their corresponding true label.\nWe empirically demonstrate that plugin has performance competitive with related methods on a variety of tabular and language tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"optimization",
"black box systems",
"domain adaptation",
"distribution shift",
"classification"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a5916c6360f04daed29de9c88907533849166537.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "An Efficient Plugin Method for Metric Optimization of Black-Box Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yEox25xAED | Grammar Reinforcement Learning: path and cycle counting in graphs with a Context-Free Grammar and Transformer approach | main | Active | Graph;Reinforcement Learning;Grammar;Cycle Counting | reinforcement learning | 3;5;5;5;6 | 3;3;4;4;4 | 2;2;2;3;3 | 2;2;2;2;2 | 1;2;3;3;3 | 4.8 | 3.6 | 2.4 | 2 | 2.4 | 0.666667 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please discuss the issues mentioned above.\n\nL. 134: in the formula brackets \\{...\\} are missing.\n\nL. 186: How do you define the adjacency matrix A? Do you mean really that A_{i,j} = 1 iff i is connected with j in G?\n\nL. 814: Lemme D.1 --> Lemma D.1"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Counting the number of paths and cycles (of specific lengths) in graphs is an important task in algorithmics and combinatorics with many applications to different fields. The authors show that using the DRL approach in combination with the Monte Carlo Tree Search method allows the discovery of more efficient matrix-based formulae for counting paths (of lengths up to six) than the best known so far. This is a nice achievement of the work.\n\nThe authors present MCTS-based DRL algorithm, termed Grammar Reinforcement Learning (GRL), that started with context-free grammar uses an equivalent Pushdown Automaton (PDA) for generating searching trees. To learn policy and value functions in the GRL a transformer architecture is proposed which models the PDA. This provides an interesting connection between the use of grammars, PDAs and reinforcement learning. Using this approach, the algorithm were able to discover formulae that are more efficient than those proposed by Voropaev and Perepechko (2012)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a Reinforcement Learning based method to discover an algebraic formula involving (adjacency) matrix of an undirected graph and some constant matrices, for counting the number of paths and cycles of specific (short) lengths in the graph. The main experimental achievement is finding (simple) formulas for path lengths l = 2, 3, 4, 5, 6. In the case of l = 2 the discovered formula is equal to the best known formula proposed by Voropaev and Perepechko (2012), and for l = 3, 4, 5, 6 the algorithm found even more efficient alternative expressions than those introduced by Voropaev and Perepechko.\n\nThe algorithm implements a searching strategy to find an optimal formula generated by an appropriate context-free grammar (CFG), or equivalently by a Pushdown Automaton (PDA). To this aim the authors propose a Monte Carlo Tree Search (MCTS) based Deep Reinforcement Learning algorithm, termed Grammar Reinforcement Learning (GRL)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is largely based on the work of Piquenot et al. (ICLR 2024), that introduced a methodology based on CFGs that led to the construction of a new Grammatical Graph Neural Network model. It is provably 3-WL equivalent. As stated in the submitted work the generative framework of Piquenot et al. (ICLR 2024) already produces all formulae identified by Voropaev and Perepechko (2012). Hence, the innovative aspects of this submission are somewhat limited. Furthermore, it is not clear to what extent the proposed approach is generic. It would be interesting to have a discussions on such applications. It would be also interesting to compare the methods using the obtained matrix-based formulae with other methods for counting paths and cycles of lengths up to six."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How are the proofs of equivalence for the grammar-generated formulae derived? Is it challenging to verify correctness once a candidate formula is generated?\n2. By characterizing various rewriting rules used in these proofs, could it be possible to automate the generation of more compact formulae using traditional methods, such as a rewriting system?\n3. If so, has the proposed approach been compared against exhaustive explorations that utilize such rewriting rules?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper addresses a problem of foundational significance, exploring the automation of mathematical creativity through machine learning. \n- The approach is well-motivated and effectively presented, with a clear architecture and a detailed explanation of generating formulae from a given grammar. \n- The framework successfully recovers several known formulae while also discovering new, more compact ones."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a grammar-based reinforcement learning (RL) approach to synthesize novel formulae that describe path and cycle counts in graphs. Building on prior work, particularly by Voropaev and Perepechko, which provided matrix multiplication and Hadamard product-based formulae for counting expressions, this study defines a context-free grammar (CFG) capable of generating these expressions. Using a reinforcement learning and transformer-based approach, the authors search for more compact formulae with improved time complexity. They report multiple successes with this framework, though they note that training the model is time-intensive."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The importance of path and cycle counting is not sufficiently argued, particularly for an ICLR audience that may require more context on its broader relevance.\n- It remains unclear whether the RL and transformer framework is essential to the solution, as it seems more like a general-purpose tool for exploring terms generated by a given CFG..\n- The framework does not provide a proof of correctness for the generated formulae, requiring a time-consuming, manual derivation for each result. Automating the generation of explanations (a sequence of rewriting rules) to demonstrate correctness would significantly improve the approach's appeal.\n- The method appears more general beyond its current application domain. The paper could be strengthened by exploring additional applications of the proposed framework."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Regarding Proofs:\n- In the proof of theorem 3.1/A.1 it says that $(N \\times \\mathrm{J} \\times \\mathtt{diag}(w)) \\cdot \\mathrm{I} = \\mathtt{diag}((N \\cdot \\mathrm{J}) \\times w)$ but if $n=2$, $N = \\begin{bmatrix} a & b \\\\\\\\ c & d \\end{bmatrix}$, $w = \\begin{bmatrix} x \\\\\\\\ y \\end{bmatrix}$, we have $(\\begin{bmatrix} a & b \\\\\\\\ c & d \\end{bmatrix} \\times \\begin{bmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\end{bmatrix} \\times \\begin{bmatrix} x & 0 \\\\\\\\ 0 & y \\end{bmatrix}) \\cdot \\begin{bmatrix} 1 & 0 \\\\\\\\ 0 & 1 \\end{bmatrix} = (\\begin{bmatrix} b & a \\\\\\\\ d & c \\end{bmatrix} \\times \\begin{bmatrix} x & 0 \\\\\\\\ 0 & y \\end{bmatrix}) \\cdot \\begin{bmatrix} 1 & 0 \\\\\\\\ 0 & 1 \\end{bmatrix} = \\begin{bmatrix} bx & 0 \\\\\\\\ 0 & cy \\end{bmatrix}$ and $(\\begin{bmatrix} a & b \\\\\\\\ c & d \\end{bmatrix} \\cdot \\begin{bmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\end{bmatrix}) \\times \\begin{bmatrix} x \\\\\\\\ y \\end{bmatrix} = \\begin{bmatrix} 0 & b \\\\\\\\ c & 0 \\end{bmatrix} \\times \\begin{bmatrix} x \\\\\\\\ y \\end{bmatrix} = \\begin{bmatrix} by \\\\\\\\ cx \\end{bmatrix}$, hence, $\\mathtt{diag}(\\begin{bmatrix} by \\\\\\\\ cx \\end{bmatrix}) = \\begin{bmatrix} by & 0 \\\\\\\\ 0 & cx \\end{bmatrix}$. Therefore, I do not yet see how the induction can work.\n- In the proof of theorem D.1, it is used that $\\mathrm{J} \\cdot (A\\times(\\mathrm{I} \\cdot (A \\times A))) = A\\times(\\mathrm{I} \\cdot (A \\times A))$ but if $A=\\begin{bmatrix} 1 & 1 \\\\\\\\ 1 & 1 \\end{bmatrix}$, we have $A \\times A = 2A, \\mathrm{I} \\cdot A = \\mathrm{I}, \\mathrm{J} \\cdot A = \\mathrm{J}$ and hence $A\\times(\\mathrm{I} \\cdot (A \\times A)) = A \\times 2\\mathrm{I} = 2A \\neq 2\\mathrm{J} = \\mathrm{J} \\cdot (A\\times(\\mathrm{I} \\cdot (A \\times A)))$.\n\nRegarding PDA:\n- The usage of nondeterministic PDA is a bit confusing as PDAs are usually language acceptors rather than language generators. Still, of course, this is technically just a relabelling of the input as output. This happens in the text without clarification.\n- There is also an inconsistent use of the terms 'transition relation' vs. 'transition function'.\n- The PDAs could be omitted entirely using generative syntax trees induced by context-free grammar as there is no build-up on automata theory. For Gramformers, this would mean defining a variable token for every nonterminal character, a rule token for every production, and a terminal token for every terminal character. This would make the approach much easier to follow."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper introduced an exciting setup of transformer-guided MCTS on context-free grammar in a reinforcement learning setting for generating specific words of the corresponding context-free language.\n- The chosen application of efficient counting cycles is quite versatile in real-world scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a reinforcement learning setting for the generation of formulae that allow for efficient counting paths and cycles up to length six in directed graphs by implementing a transformer-guided Monte Carlo tree search on a generative syntax tree of a suitable context-free grammar.\n\nWhile the application of efficiently counting cycles is very intuitive, and the method of transformer-guided MCTS on formal grammar is very interesting, the paper misses any discussion of related work concerning RL methods of deep neural network-guided MCTS or MCTS on formal grammar. The paper would also heavily benefit from discussing other use cases of transformer-guided MCTS on formal grammar for GRL such that the paper could have focused more on the method of GRL rather than a specific use case only. The explanations and proofs also need clarity and seem incomplete or partly wrong."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper does not mention any other work related to the used/introduced machine learning method of grammar/language/llm-guided MCTS (the cited work is only about the chosen application of efficiently counting paths/cycles in graphs).\n- The paper could include other GRL applications to strengthen the introduced method's flexibility.\n- The relevance of counting cycles could be better illustrated with a few examples next to the literature reference.\n- Some explanations need coherence and clarity, especially the usage of PDAs in the paper, which could be more accurate or even omitted. (see Questions)\n- At least two proofs must be completed or corrected, and a proper list of necessary conditions/definitions could improve readability in most cases. (see Questions)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "C1. Given the usage of a reinforcement learning approach, the Markov Decision Process (i.e., the state/action/transitions/rewards) should be clearly specified in mathematical terms. The current textual description is ambiguous.\n\nC2. The parameters used in your model (e.g., exploration parameter for MCTS, architectural parameters and optimizer / learning rate etc. for the transformer) should be clearly specified in an Appendix. The reproducibility is limited otherwise.\n\nC3. There are a few clashes in notation because of trying to unite RL and CFG/DFA notations. For example, Q is used to refer to both the set of states of the automaton as well as the Q-value in RL. I'd suggest checking that each symbol is used consistently.\n\nC4. I would suggest discussing whether and how the approach could apply to directed graphs as well (it seems undirected graphs are assumed).\n\nC5. Could you also comment on what would be needed to generalise the approach to cycles of arbitrary length (more than 7)? Is it simply a matter of applying your method more computational power, or is the approach limited in this sense to predefined path lengths?\n\nC6. Typos: \"ouputs\" (Fig 6 caption)"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1. The authors treat an important problem with an interesting and novel methodology, and use it to discover demonstrably better path and cycle search algorithms.\n\nS2. The paper is fairly well-written and the technique appears sound."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers the problem of path and cycle counting in graphs. It aims to do so efficiently using formulae that perform a set of pre-defined matrix operations, specified as a context-free grammar. The authors frame this task as a combinatorial optimization problem and propose a reinforcement learning method to solve it. The method is made up of a Monte Carlo Tree Search with neural networks for function approximation. Particularly, the authors propose a transformer architecture that can process tokens from the grammar. The authors demonstrate that their method discovers more efficient formulae for path and cycle counting than those that were previously known."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. The biggest weakness of the work is its very limited evaluation. The paper proposes a very complex methodology for operating in this discrete search space, but does not compare it with meaningful baselines. It is therefore not possible to determine whether the proposed method is indeed a better means of navigating this search space than other more standard methods, especially given the very large reported computational cost. At the very least, in my opinion, the paper should include empirical comparisons with:\n\n- Online Monte Carlo Tree Search without any function approximation (to demonstrate the benefit of using the transformer within the search);\n- The learned transformer policy at convergence;\n- A classic metaheuristic such as simulated annealing;\n- A simple random search that samples from the grammar and is given the same amount of rollouts as MCTS.\n\nW2. Another weakness of the work is that the method does not generate algorithms that are provably correct. Indeed, the authors have to resort to proving the correctness of the algorithms themselves, which they do. This stems from the fact that, to evaluate a given formula, the method needs to compare the output over a limited set of graphs with generated ground truth values. It could be the case that a formula produces the correct outputs for this set of examples while missing some edge cases. This is a limitation that should be acknowledged and discussed.\n\nW3. The literature review omits many related works on reinforcement learning for combinatorial optimization over graphs. I would suggest including at least [1], one of the first recent works to treat this type of problem with RL, and which also uses token-like sequences; [2], a work that made substantial leaps in RL for combinatorial optimization in terms of performance and scalability; as well as other recent works in this space (see [3] for a survey).\n\n[1] Vinyals, O., Fortunato, M., & Jaitly, N. (2015). Pointer networks. Advances in neural information processing systems, 28.\n\n[2] Khalil, E., Dai, H., Zhang, Y., Dilkina, B., & Song, L. (2017). Learning combinatorial optimization algorithms over graphs. Advances in neural information processing systems, 30.\n\n[3] Darvariu, V.-A., Hailes, S., & Musolesi, M. (2024). Graph Reinforcement Learning for Combinatorial Optimization: A Survey and Unifying Perspective. Transactions on Machine Learning Research."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Could you help to answer my concern on the first point of weakness?\n- Could you clarify the purpose of the cubic shapes in Figures 3, 4, and 5? Are they intended to convey a particular meaning beyond their function as symbols?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Great literature work: This paper references existing literature, which demonstrates an awareness of previous work in the area and situates the study within the broader research context.\n- Full details of the proposed method with detailed theoretical justification."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a new approach to efficiently count paths and cycles in graphs. GRL uses a pushdown automaton (PDA) approach to generate and optimize mathematical formulas for path and cycle counting, addressing computational challenges in fields such as network analysis, biology, and social sciences. By framing path/cycle counting as a CFG-constrained search problem, GRL discovers new matrix-based formulas, improving computational efficiency by two to six times over current methods. Key contributions include a generic framework for generating efficient formulas within a CFG, the development of Gramformer to learn policies and values within a PDA model, and the identification of novel, efficient counting formulas for graph substructures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The focus of this work is on re-solving a problem that requires a polynomial-time algorithm, as indicated in Line 99: \"As mentioned in Section 1, path/cycle counting has been extensively tackled in the literature.\" This context renders the proposed methods less impactful. I recommend that the authors provide a more compelling rationale for why this issue is considered challenging within the fields of network analysis, computer science, biology, and social sciences.\n\n- There are no experiments conducted on datasets from network analysis, computer science, biology, or social sciences. Given that the results in Figure 7 indicate an algorithm running in 0.2 seconds, which is already considered \"very fast,\" it is unclear what specific challenge is being addressed in this figure.\n\n- The mathematical notation used throughout the paper tends to create confusion rather than enhance the reader's understanding of the problem."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024grammar,\ntitle={Grammar Reinforcement Learning: path and cycle counting in graphs with a Context-Free Grammar and Transformer approach},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yEox25xAED},\nnote={under review}\n}"
},
"abstract": {
"value": "This paper presents Grammar Reinforcement Learning (GRL), a reinforcement learning algorithm that uses Monte Carlo Tree Search (MCTS) and a transformer architecture that models a Pushdown Automaton (PDA) within a context-free grammar (CFG) framework. Taking as use case the problem of efficiently counting paths and cycles in graphs, a key challenge in network analysis, computer science, biology, and social sciences, GRL discovers new matrix-based formulas for path/cycle counting that improve computational efficiency by factors of two to six w.r.t state-of-the-art approaches. Our contributions include: (i) a framework for generating transformers that operate within a CFG, (ii) the development of GRL for optimizing formulas within grammatical structures, and (iii) the discovery of novel formulas for graph substructure counting, leading to significant computational improvements."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph",
"Reinforcement Learning",
"Grammar",
"Cycle Counting"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3def87a8eba80de2f61a06bb1723ffdbd54d04f1.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Grammar Reinforcement Learning: path and cycle counting in graphs with a Context-Free Grammar and Transformer approach"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yEwakMNIex | Unified Neural Solvers for General TSP and Multiple Combinatorial Optimization Tasks via Problem Reduction and Matrix Encoding | main | Active | Travelling Salesman Problem;Neural Combinatorial Optimization | learning on graphs and other geometries & topologies | 3;5;5;5 | 4;5;4;3 | 3;3;2;3 | 2;2;2;3 | 2;3;3;3 | 4.5 | 4 | 2.75 | 2.25 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. When converting various combinatorial optimization problems to TSP instances, are there cases where the reduction process fails or underperforms due to problem characteristics? How does the method handle these instances?\n\n2. Given that MatDIFFNet has longer inference times due to complex diffusion steps, are there plans to optimize the model architecture or algorithm to improve inference speed and computational efficiency?\n\n3. In multi-task training, does the interaction between different tasks lead to performance drops in any specific task? Is there a clear mechanism in the model to handle task weight allocation and interdependencies?\n\n4. How robust are MatPOENet and MatDIFFNet when the input data contains noise or incomplete information? Were there any robustness tests conducted?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The introduction of MatPOENet and MatDIFFNet, which use Transformer-based and diffusion-based models, respectively, showcases the application of advanced neural network structures to solve matrix-encoded TSP problems.\n\n- The RedCO framework offers a novel approach by unifying different combinatorial optimization (CO) problems through reduction to a general TSP format. This reduction expands the scope of neural solvers to tackle diverse problem types in a single architecture.\n\n- RedCO's capability to handle non-metric, asymmetric, and discrete TSP instances, unlike traditional Euclidean-focused TSP solvers, significantly broadens its applicability.\n\n- The RedCO framework is designed to incorporate various solver types, including existing methods like DIMES, showing the framework's modularity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a unified neural solver framework called RedCO, which uses problem reduction techniques to map different combinatorial optimization (CO) problems to the general Traveling Salesman Problem (TSP) format. Two novel neural solvers, MatPOENet and MatDIFFNet, are introduced to handle matrix-encoded inputs and solve these problems efficiently. This work aims to extend neural combinatorial optimization beyond specific problem types by providing a scalable solution for problems like asymmetric TSP (ATSP), directed Hamiltonian cycle problems (DHCP), and 3-Satisfiability (3SAT)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the framework performs well for medium-scale problems, its efficiency and feasibility for large-scale, real-world instances (e.g., with tens of thousands of nodes) are not thoroughly demonstrated or tested.\n\n- The use of complex neural models like MatPOENet and MatDIFFNet makes it difficult to understand the inner workings and decision-making processes of these solvers. More interpretability features or case studies would be beneficial.\n\n- The paper mainly focuses on synthetic data for testing, with limited discussion on how the models would handle real-world problem instances that could have different statistical properties.\n\n- There is little exploration into how the proposed solvers manage noisy or incomplete data, which is common in practical applications.\n\n- The MatDIFFNet, while powerful for certain problem types, is computationally intensive, which may hinder its use for larger instances or require additional optimization strategies."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Table 2 shows that MatPOENet and MatDIFFNet outperform LKH in solving 3SAT problems, but they tend to produce worse results in most other scenarios. Could you provide some explanations for this?\n2. How does RedCO perform on standard TSPs (Symmetric TSPs)?\n3. In line 268, the POE is based on $f(x) = 1/cosh(100x)$, can you give more introduction of the empirical function?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-written and easy to understand.\n2. As far as I know, this is the first study attempting to create a general framework for learning various COPs in a unified manner.\n3. The experiments conducted are thorough, and the results effectively showcase the framework's capability to handle arbitrary matrix-encoded TSPs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors propose RedCO, to unify a set of CO problems by reducing them into the general TSP form featured by distance matrices. RedCO demonstrates the potential to efficiently train a neural TSP solver using a diverse range of CO instances, and can also be adapted to specialize for specific problem types."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The organization of the experimental section is lacking. With seven research questions (RQs) presented, the lack of clear categorization makes this part of the paper somewhat difficult to navigate.\n2. The results for DIFUSCO and T2T are not included. It is noted that MatDIFFNet performs well on 3SAT problems, which is developed upon DIFUSCO and T2T.\n3. While the specific problem reduction is detailed in Appendix A, it would be helpful to have a more detailed introduction to the reduction principles and the applicable COPs. Specifically:\n - What types of COPs (or what properties must COPs have) can be reduced to a general TSP?\n - What considerations should be taken into account when performing this reduction?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. MatDIFFNet is trained on 8 NVIDIA H800 80GB GPUs with Intel Xeon (Skylake, IBRS) 16-core CPU is super computational resource consuming, I am curious about the ablation experiments on computational resources.\n2. The test questions in the article experiments are too limited, this article mentions applicability for ``P ≤P general TSP or P in matrix format (VC, Clique, VRPs, FFSP, MIS, etc.)`` etc., I would highly recommend to introduce evaluations on more CO problems to respond to my concerns on applicability.\n3. In Line 1137, you mention that ``Also, they generally evaluated their proposed methods on no larger than 100 nodes of TSP/VRP instances, with a major emphasis of methodological innovations rather than eager pursuit of scalability at sheer engineering level.`` What means the sheer engineering level? Also, it seems that this paper also mainly focuses on no larger than 100 nodes. Please provide a clear explanation.\n4. This paper uses a unique test problem design, which I think requires the authors to implement more comparative algorithms (e.g., GOAL, MVMOE) on the problems they cover for experimental validation.\n5. The results in Table 5 are not sufficient to illustrate performance on larger-scaled data, I think you should provide experiments without the aid of an external process to explore whether the model has the ability to scale up. Also, this paper does experiments on scales of 20-100, and I doubt that it makes sense to compare methods that address large-scale CO problems such as GLOP."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This article is well-written and reasonable.\n2. The author has carried out abundant experiments and discussions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an interesting approach to dealing with multi-task CO by transforming several CO problems into equivalent TSPs. This paper also proposes two new solvers, MatPOENet and MatDIFFNet, to solve the following TSP."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I think this article has two major weaknesses that should be considered.\n1. This paper adopts a quite special modeling approach, and its applicability is worrying. I doubt the effectiveness of reducing multi-CO problems into the general TSP form featured by distance matrices. \nAccording to Fig. 1, you show that NP problems can be transformed into SAT, I am concerned if this part can be proved. For some problems, the transformation into TSP is itself an NP problem if you want to maintain the Found Rate ``FR`` (e.g., CVRP, as mentioned in Appendix D.2.4, ``first clustering points and then solving each cluster as a TSP`` will harm the FR), and even if it can be transformed into TSP, the time complexity of such transaction may increase dramatically.\n2. Some of the experiments in this paper are not clearly described. I tried my best to find out but it is still not clear what the exact settings of the * version, single, and mixed in Table 2 are."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In RQ2 you show a comparison of solution times and results with the LKH method. The reason you showed efficiency in this experiment compared to LKH seems to come entirely from superior performance on the 3-SAT problem. I don't think it is fair to take Average L in this case. I am more curious as to why LKH performs poorly on the 3-SAT problem and where MatPOENet and MatDIFFNet excel in this problem. Can you provide a visual example to help me understand this result intuitively?\n2. For solving efficiency, I think this paper should be compared more with Gurobi for efficiency. I am very curious about the results of this part. I also suggest you add time as reference in Table1.\n3. I am having trouble understanding the specific N and d settings for variants in the ii) part of RQ3 and especially in Table 4. I need more explanation about it.\n4. What is the significance of MatDIFFNet? Based on the results so far (ignoring the future work you mentioned) it looks like its lagging behind in performance and efficiency as well as training efficiency. I would suggest deleting this section or putting it in the appendix. Also The authors say in RQ7 that MatDIFFNet has the potential for more accurate solution space for larger scale instances while mentioning in the limitation that ``MatDIFFNet has the potential for direct solving of larger instances but is currently yet to be implemented.`` But I can't find any evidence for this. But I can't find any evidence for this , please explain this."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The Problem reduction of this paper has theoretical support.\n2. The proposed methods of this paper has advantages in terms of effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This article focuses on multi-task CO problems, proposes a solution method that is general for several CO problems and presents two efficient solvers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The RedCO approach proposed in this paper is not intuitively applicable to a wide range of CO problems. I think the value of multi-task CO should be reflected in its applicability to most CO problems.\n2. The contribution of this paper is weak, translating these problems into a TSP is not a new idea and TSP solver is quite well developed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024unified,\ntitle={Unified Neural Solvers for General {TSP} and Multiple Combinatorial Optimization Tasks via Problem Reduction and Matrix Encoding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yEwakMNIex},\nnote={under review}\n}"
},
"abstract": {
"value": "Various neural solvers have been devised for combinatorial optimization (CO), which are often tailored for specific problem types, ranging from TSP, CVRP to SAT, etc. Yet, it remains an open question how to achieve universality regarding problem representing and learning with a general framework. This paper first proposes RedCO, to unify a set of CO problems by reducing them into the general TSP form featured by distance matrices. The applicability of this strategy is dependent on the efficiency of the problem reduction and solution transition procedures, which we show that at least ATSP, HCP, and SAT are readily feasible. The hope is to allow for the effective and even simultaneous use of as many types of CO instances as possible to train a neural TSP solver, and optionally finetune it for specific problem types. In particular, unlike the prevalent TSP benchmarks based on Euclidean instances with 2-D coordinates, our focused domain of general TSP could involve non-metric, asymmetric or discrete distances without explicit node coordinates, which is much less explored in TSP literature while poses new intellectual challenges. Along this direction, we devise two neural TSP solvers with and without supervision to conquer such matrix-formulated input, respectively: 1) MatPOENet and 2) MatDIFFNet. The former is a reinforcement learning-based sequential model with pseudo one-hot embedding (POE) scheme; and the latter is a Diffusion-based generative model with the mix-noised reference mapping scheme. Extensive experiments on ATSP, 2DTSP, HCP- and SAT-distributed general TSPs demonstrate the strong ability of our approaches towards arbitrary matrix-encoded TSP with structure and size variation. Source code and data will be made public."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Travelling Salesman Problem",
"Neural Combinatorial Optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/af162805c8600bab281edcce3e6f0eb428c69872.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Unified Neural Solvers for General TSP and Multiple Combinatorial Optimization Tasks via Problem Reduction and Matrix Encoding"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yFEqYwgttJ | MDSGen: Fast and Efficient Masked Diffusion Temporal-Aware Transformers for Open-Domain Sound Generation | main | Active | vision-guided audio generation;fast inference;open-domain sound synthesis;masked diffusion models;temporal learning;visual sound source localization;generative AI | generative models | 1;5;6;6 | 4;4;4;4 | 1;2;3;3 | 1;2;4;3 | 4;2;4;4 | 4.5 | 4 | 2.25 | 2.5 | 3.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Three models are presented in the paper (Tiny, Small and Base) except the overfitting large model. What is the model presented in the supplementary files?\n\nIs there any reason to use an image VAE instead of audio VAEs? \n\nDid the authors observe any advantage of GLA over a neural vocoder?\n\nIs it possible to run a subjective listening test, and see the consistency between human evaluation and the audio-video alignment accuracy measured by a DNN model?\n\nCould the authors measure the FAD scores on top of the current FID scores?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "Most contributions of MDSGen are about micro design aspects.\n1. Channel selection of Mel-spec\n2. Time-aware masking strategy for generative models\n3. Reduced dimension of the video features\n4. Small model size and fast inference speed"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes \"MDSGen\", an efficient model based on Masked Diffusion Transformer, for video-to-audio generation.\n\nThe challenges of video-to-audio generation are mainly:\n1. Heavy computation and memory usage;\n2. Requirements for the audio quality;\n3. Requirements for the audio-video alignment;\n\nMDSGen reduces the resource consumption by using very light-weight Transformer coupled with fast diffusion samplers such as DPM solver, as well as a dimension reduction module to reduce the size of the video conditioning embeddings.\n\nMDSGen improves audio quality and audio-video quality by introducing a time-aware masking strategy into the mask DiT framework, together with other efforts.\n\nConceptualy, MDSGen looks like a framework that replaces the \"text prompt\" in text-to-audio DiT [StableAudioOpen],[MakeAnAudio2] by a video feature embedding. Hence the technical contributions are more in micro aspects.\n\nHowever, some design choices may have severely affected the audio quality, making the work less solid or reusable to the community. Audio quality observed in the supplementary files is far from the level in modern text-to-audio models such as [AudioLDM], [MakeAnAudio2], [SpecMaskGIT], [StableAudioOpen]."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "## Major issues\n### 1. Improper audio reconstrution pipeline\nMDSGen ustilizes the VAE from Stable Diffusion, which is not trained for Mel-spec. Although the authors carefully discussed how to take the most advantage of this image VAE, the discussion itself is **NOT** reusable for the audio community, as there have been plenty of audio VAE designs, some of which are publicly available such as [AudioLDM], [MakeAnAudio2], [StableAudioOpen], [DAC].\n\nAnother improper choice is that, MDSGen utilizes the Griffin-Lim Algorithm (GLA) to convert mel-spec back to wave forms. GLA has almost been abandoned by audio community, due to the recent advance in neural vocoder, e.g., [HiFiGAN], [UnivNet], [BigVGAN]. I believe the apparent phase distortion in the supplementary files might have been caused by GLA.\n\nThere is a rough comparison on the quality of audio reconstruction pipeline in a recent paper [SpecMaskGIT], I hope it could be useful for the improvement of MDSGen.\n\nI strongly recommend the authors to consider audio-specified reconstruction pipelines for improved audio quality. Even in audio-visual generation community, we can see the usage of such audio VAE for excellent audio quality, e.g., [VisualEchoes]\n### 2. Invalid claims on the result\nBecause the audio quality is far from the baseline in audio generation community, it is improper to claim that MDSGen is better in \"audio-video alignment\".\n\nI believe, the audio-video alignment can be evaluated only when the audio quality is sufficiently good. Given the current audio quality, I don't think the model is ready for further evaluation.\n## Minor issues \n### 1. Evaluation metrics\nThe FID used in this paper comes from the implementation of SpecVQGAN, a pioneer of audio generation. However, the FAD implementation ([AudioLDM],[FAD_github]) has been more widely accepted in audio community. Evaluating with the widely adopted FAD metric can also help the readers to compre the audio quality with other audio generation models.\n### 2. Insufficient ablation study\nMDSGen trains a learnable module to reduce the video feature sequence into a single vector. From Figure 6, we can observe that the learned weights are quite evenly distributed (except the beginning and ending frames).\n\nThe observation posts a question: How much improvement can the learnable reducer bring compared to a naive average pooling?\n\n[StableAudioOpen]: https://arxiv.org/abs/2407.14358\n[MakeAnAudio2]: https://arxiv.org/abs/2305.18474\n[AudioLDM]: https://audioldm.github.io/\n[SpecMaskGIT]: https://arxiv.org/abs/2406.17672\n[DAC]: https://github.com/descriptinc/descript-audio-codec\n[HiFiGAN]: https://github.com/jik876/hifi-gan\n[UnivNet]: https://github.com/rishikksh20/UnivNet-pytorch\n[BigVGAN]: https://github.com/NVIDIA/BigVGAN\n[VisualEchoes]: https://arxiv.org/abs/2405.14598\n[AudioLDMEval]: https://github.com/haoheliu/audioldm_eval\n[FAD_github]: https://github.com/gudgud96/frechet-audio-distance"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper introduces a novel framework for video-to-audio sound generation that effectively combines a temporal-aware masking strategy with a redundant feature removal module. \n\nMDSGen demonstrates significant improvements in model efficiency by using a smaller masked diffusion transformer architecture. The framework achieves high alignment accuracy on benchmark datasets with a fraction of the parameters, memory usage, and inference time compared to baselines. \n\nThe paper provides a structured explanation of MDSGen’s architecture and mechanisms, including the Temporal-Awareness Masking (TAM) and the Reducer module for filtering out redundant features. Extensive experimental results on VGGSound and Flickr-SoundNet datasets clearly validate the method’s effectiveness, with MDSGen achieving superior performance across alignment accuracy and efficiency metrics."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents MDSGen, an efficient framework for vision-guided sound generation that minimizes model size, memory usage, and inference time. Key innovations include a temporal-aware masking strategy to enhance alignment accuracy and a redundant feature removal module to filter unnecessary video information. Using a lightweight masked diffusion transformer, MDSGen outperforms larger Unet-based models on VGGSound and Flickr-SoundNet, achieving high synchronization and alignment with significantly reduced computational costs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of Novelty and Contribution: The paper presents the primary contributions are the Temporal-Awareness Masking (TAM) strategy and the visual Reducer module. However, masking strategies have been widely explored in audio generation research, as seen in works like [1, 2], and the specific concept of Temporal-Awareness Masking has been studied in [3, 4]. The visual Reducer module, primarily a 1x1 convolutional layer (line 181), lacks detailed design innovations, which limits its distinctiveness and impact.\n\n2. Insufficient Exploration of Design Choices: For video-to-audio generation, the choice of video encoder plays a crucial role in understanding the video content. Clarification on the selection of CAVP as the video encoder would add valuable insight. Additionally, the paper could explore using more video encoders, such as CLIP [5], VideoMAE [6], ViVit [7], and TAM [8], which could enrich the technical depth of the proposed method.\n\n3. Presentation and Writing:\nSome claims in the paper lack supporting evidence, such as the statements in lines 183-185 that the proposed method “minimizes redundant features that could lead to overfitting” and in line 224 that setting N_2 = 4 “gives better performance for audio data.” These points would benefit from empirical support to substantiate their validity.\n\n4. Supplementary Material: The quality of generated audio samples in the supplementary material raises concerns regarding the overall quality of results produced by the proposed method, which may affect its effectiveness and appeal.\n\n[1]Pascual S, Yeh C, Tsiamas I, et al. Masked Generative Video-to-Audio Transformers with Enhanced Synchronicity[J]. arXiv preprint arXiv:2407.10387, 2024.\n\n[2]Borsos Z, Sharifi M, Vincent D, et al. Soundstorm: Efficient parallel audio generation[J]. arXiv preprint arXiv:2305.09636, 2023.\n\n[3]Bai H, Zheng R, Chen J, et al. A $^ 3$ T: Alignment-Aware Acoustic and Text Pretraining for Speech Synthesis and Editing[C]//International Conference on Machine Learning. PMLR, 2022: 1399-1411.\n\n[4]Garcia H F, Seetharaman P, Kumar R, et al. Vampnet: Music generation via masked acoustic token modeling[J]. arXiv preprint arXiv:2307.04686, 2023.\n\n[5]Radford A, Kim J W, Hallacy C, et al. Learning transferable visual models from natural language supervision[C]//International conference on machine learning. PMLR, 2021: 8748-8763.\n\n[6]Tong Z, Song Y, Wang J, et al. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training[J]. Advances in neural information processing systems, 2022, 35: 10078-10093.\n\n[7]Arnab A, Dehghani M, Heigold G, et al. Vivit: A video vision transformer[C]//Proceedings of the IEEE/CVF international conference on computer vision. 2021: 6836-6846."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No concern."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. The reducer is designed to have fixed weights, serving as a weighted average of all the frames. However, the sound events of different videos are distinct, so why not adopt a dynamic weighted average strategy, e.g., attention pooling?\n2. What is the exact implementation of the model? How is the visual conditioning (i.e., $p(x|v)$) implemented?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The idea of compressing visual representations into one single vector is bold and intriguing, which reduces significant computing pressure on the DiT side.\n2. The proposed TAM strategy is interesting and makes sense. Previous works about masking audio representations, such as AudioMAE, have drawn conclusions that the unstructured masking strategy is superior, which contradicts the conclusion in this paper. I believe this paper brings more insights into this topic. \n3. The authors have conducted tons of ablation experiments to support their model design and parameter decision, making the conclusions plausible."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces MDSGen, an innovative framework designed for vision-guided open-domain sound generation with a focus on optimizing model parameter size, memory consumption, and inference speed. It features two major innovations: a redundant video feature removal reducer and a temporal-aware masking strategy. Utilizing denoising masked diffusion transformers, MDSGen achieves efficient sound generation without the need for pre-trained diffusion models. On the VGGSound dataset, the smallest MDSGen model demonstrates a 97.9% alignment accuracy while using fewer parameters, consuming less memory, and performing faster inference compared to the current state-of-the-art models. The results underscore the scalability and effectiveness of this approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The major concern is about the modeling of audio representations, as I am familiar with this field. I believe that Mel spectrum is more of a 1D feature rather than 2D, because the spectrum does not satisfy translational invariance (if a formant chunk in a spectrum is moved from the bottom left to the top right, the semantics of the sound are likely to be completely destroyed), and the frequency domain and time domain cannot simply be simulated by spatial coordinates. A relevant observation in this paper is that a complete random masking strategy is underperformed by the temporal-aware masking strategy. Therefore, I believe that considering Mel spectrograms as gray-scale images and modeling them using 2D VAE pretrained with real images is suboptimal, which further prevents modeling sounds with varying lengths. There are already approaches that model audio using 1D VAE, such as Make-an-audio 2. So can the authors provide justifications for choosing 2D rather than 1D? In my view, choosing 1D combined with the TAM strategy could form a more compelling motivation.\n2. The idea of compressing visual representations into one single vector is intriguing. However, I don't understand why this could work. How does one single vector provide accurate information about temporal position? I believe Diff-foley works because it adopts a sequence-to-sequence cross-attention mechanism, which provides rough sequential and positional information for the audio to follow. Could the author provide further analysis and discussion on this point? For example, analyzing the components related to temporal position within that vector, or the relation of the learned weights of reducer between key frames of videos. \n3. Similar concern: the learned weights of reducer seem to be focused more on the head and tail frames of videos. Does this imply that the reducer is more focused on global video information? How can it be determined that it is capable of extracting local positional information?\n4. The alignment classifier proposed in Diff-foley only reaches 90% accuracy on their test set. However, the best performance in this paper reaches 98+. How could this happen? Is the classifier involved during the training process?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. MDSGen achieves strong results with a small model size, making it useful for real-time applications. Compared to larger models, it is faster and more memory-efficient.\n2. TAM is an interesting approach that focuses on time-based information in audio, aiming to improve alignment by using masking based on temporal patterns rather than spatial patterns (commonly used for images).\n3. The paper provides extensive experiments with detailed comparisons against other models. Ablations for each key component further clarify the model’s design choices."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents MDSGen, an efficient and compact framework for generating open-domain sounds guided by visual input. MDSGen uses masked diffusion transformers instead of the usual U-Net architectures, optimizing for faster speed, lower memory use, and parameter efficiency. Key features include a module to remove redundant video data and a Temporal-Awareness Masking (TAM) strategy, both aimed at efficient, high-quality audio-visual alignment. Tests on the VGGSound and Flickr-SoundNet datasets show that MDSGen achieves strong alignment accuracy with much lower computational demands than larger models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I question the decision to use an image-trained VAE (from Stable Diffusion) rather than an audio-specific VAE, such as those in AudioLDM [1]. An audio-dedicated VAE could better capture the temporal and spectral nuances inherent to sound, which are often lost when treating audio as an image. Relying on an image-based VAE reduces the model’s potential to fully leverage audio-specific features and may affect TAM’s performance.\n2. The authors highlight channel selection within the RGB output as a means of optimizing the final mel-spectrogram. While using the G channel showed marginal improvements, I question if relying on such RGB channel selection can sufficiently address the nuances of audio spectrogram representation (similar to 1). A more audio-specific solution that doesn’t require treating spectrograms as RGB images would likely be more consistent with the needs of audio data, as these channels are meant for pixels, not spectral data.\n3. I would suggest that the authors conduct a human perceptual study to better assess audio quality. Relying solely on quantitative metrics may not fully capture perceptual quality, as these measures can sometimes be unreliable.\n4. In Section 5.4, various masking strategies are explored, and TAM shows a clear improvement over random masking and FAM. However, the reasons for TAM’s superiority are not fully explained. It would be beneficial to discuss why TAM outperforms FAM in this context, particularly since FAM is intuitively suitable for audio data.\n5. I noticed a missing citation for SpecAugment [2] and AudioMAE [3], a masking approach relevant to TAM proposed here.\n\nReferences\n\n[1] Liu et al. AudioLDM: Text-to-Audio Generation with Latent Diffusion Models.\n\n[2] Park et al. SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition.\n\n[3] Huang et al. Masked Autoencoders that Listen."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A novel approach is presented for highly efficient vision-guided sound synthesis using masked diffusion models"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mdsgen,\ntitle={{MDSG}en: Fast and Efficient Masked Diffusion Temporal-Aware Transformers for Open-Domain Sound Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yFEqYwgttJ},\nnote={under review}\n}"
},
"abstract": {
"value": "We introduce MDSGen, a novel framework for vision-guided open-domain sound generation optimized for model parameter size, memory consumption, and inference speed. This framework incorporates two key innovations: (1) a redundant video feature removal module that filters out unnecessary visual information, and (2) a temporal-aware masking strategy that leverages temporal context for enhanced audio generation accuracy. In contrast to existing resource-heavy Unet-based models, MDSGen employs denoising masked diffusion transformers, facilitating efficient generation without reliance on pre-trained diffusion models. Evaluated on the benchmark VGGSound dataset, our smallest model (5M parameters) achieves 97.9% alignment accuracy, using 172x fewer parameters, 371% less memory, and offering 36x faster inference than the current 860M-parameter state-of-the-art model (93.9% accuracy). The larger model (131M parameters) reaches nearly 99% accuracy while requiring 6.5x fewer parameters. These results highlight the scalability and effectiveness of our approach."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"vision-guided audio generation",
"fast inference",
"open-domain sound synthesis",
"masked diffusion models",
"temporal learning",
"visual sound source localization",
"generative AI"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/fada5da88a48955dabddda9ffa955b128b00c16e.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/15cf2434cce953c18de4f78948a1e596b8f8c6b4.zip"
},
"title": {
"value": "MDSGen: Fast and Efficient Masked Diffusion Temporal-Aware Transformers for Open-Domain Sound Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yFGR36PLDJ | Simple, Good, Fast: Self-Supervised World Models Free of Baggage | main | Active | Reinforcement learning;World models;Self-supervised learning;Atari 100k | reinforcement learning | 3;6;6;8 | 3;2;3;3 | 3;3;3;3 | 1;3;2;3 | 3;3;2;3 | 5.75 | 2.75 | 3 | 2.25 | 2.75 | -0.080845 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I think it will be useful if the paper can present accuracy numbers in similar training time, or training time numbers at roughly the same accuracy numbers. That will be very useful in positioning this work against existing methods."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "## Presentation\nThis paper is well-written with thorough discussion on the related works, the design philosophy and the precise formulations of the proposed modeling. The discussions are usually precise and insightful. The important elements in building the proposed world-models, such as the POMDP formulation, the representation learning (including sufficient details, such as image augmentations, temporal consistency, covariance regularization), the dynamics learning (the conditional independence assumption and the resultant factorization) are all presented clearly, leaving the readers with no doubts on the technical components and the underlying reasoning.\n\nIn many cases, the discussions have involved clear contrast with prior works, for example in Table 1 where the design choices in a good selection of prior works are presented, and compared with the design choices in the proposed SGF.\n\n## Empirical Studies\nThe paper is tested on the standard Atari 100k tasks that is standard for this kind of work. The empirical study seems sounds and demonstrates a few interesting properties of this work that may be useful for researchers in this area. In particular\n- It has presented through ablation studies in Section 4.2. showing the importance of various design components, such as state (image) augmentations, action/frame stacking, temporal consistency and sample contrastive formulation. Additional details are also included in the appendix, such as in Table 3 in Appendix D.\n- The paper has presented detailed comparisons with other methods, as presented in Appendix D Table 2."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discusses the design of a world model - a parametric model that predicts the transitions probabilities, reward functions and terminal state distributions to serve as a simulation environment for reinforcement learning. The paper proposes to simplify some existing elements in recent world-models, such as sequence models (i.e. RNNs, transformers), to focus on simple ingredients (e.g. frame stacking). It instead proposes to keep the maximum information and temporal consistency formulation as the most essential properties of effective world models. The resultant model is dubbed \"Simple, Good and Fast World Models\" (SGF) which demonstrates somewhat competitive performance against other existing world models in the Atari 100k benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a few limitations that seem to limit the contributions of this work.\n\n- As discussed thoroughly in the related work section in the paper, world model in training reinforcement learning agents is not a new idea. In such cases, it is useful to establish that this work is addressing a significant weakness in prior works, without sacrificing other important metrics. In this case, the main motivation of SGF seems to be presenting a simple, fast, yet accurate method to train a good world model. While SGF certainly fit the bill for the first two, it seems to fall short significantly in the third. The result in Table 2 seems to suggest that SGF is much weaker than prior works such as IRIS and DreamerV3, which are all based on learning on imagination and hence are arguably fairly related.\n\n- The choice to drop sequence models appears to be a fairly significant limiting factor. This was acknowledged clearly in Line 462-464, which lists the decision to drop sequence models as a potential reasons why this work does not reach SOTA performance on Atari 100k. This seems like a fairly trivial observation - certain games in Atari 100k requires long term reasoning, and it is indeed one of the most challenging aspects of world models for RL agents. It should be expected that removing a components specifically designed to address this important challenge will result in inferior performance. It seems unlikely that researchers in this field will learn much more about how to design better world models if they are simply presented with results comparing methods with and without a sequence model - they probably already know that it is going to be much worse for certain tasks that require long term reasoning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How do the method perform with long-term dependencies?\n2. How do feature extraction perform if we change the SSL training objective?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. I believe these kinds of works are important. It is easy to just incrementally propose new components to improve the performance of systems while considerably increasing the engineering complexity. This does not give a clear view of the actual importance of the components included in the SOTA of world modeling. Going in a completely different direction is in my opinion a needed move sometimes, and it will help to shape new design choices for world modeling. Hence, the motivation is strong, and the reasoning behind that is coherent.\n2. The results are compelling. I believe that on Atari100K the simple method proposed performs quantitatively well, even compared to baselines that are far more complex, and with considerably lower runtime.\n3. The comparison with existing methods is nontrivial and requires a proper analysis of the literature. Table 1 is also interesting and will be useful for future work.\n4. The paper is well-written and well-motivated, all introduced explanations are useful and the writing is compact enough. The proposed experiments are interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new approach to world modeling for the training of RL-based agents. The current state of literature employs different mechanisms to achieve a correct modeling of long sequences, including reconstruction, recurrent modules, memory, etc. The authors propose to simplify the world modeling and just use self-supervised techniques, inspired by VicReg, and a simple learning strategy for the system dynamics. There is a comparison of existing methods and the proposed one on Atari100k, and appropriate ablations of the components."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I believe that while the proposed method focuses on short-term dependencies, as correctly stated in the limitation, how much performance degrades with an increasingly long-term dependency on actions would be important to quantify. This will allow us to assess the limitations of the proposed method in a more robust manner, for people to build upon.\n2. It is not clear to me why only VICReg is chosen for representation extraction. There are relationships with BYOL and SimSiam as reported in the appendix, but it is unclear how performance would change if these approaches were instead used for representation extraction rather than VICReg."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Although this is partially explained in the Limitations, could you go into more detail about: for more complex tasks than Atari-games, where longer term knowledge needs to be remembered (where orders of magnitude more memory of early events is required), what parts of the models should be scaled up or significantly modified / replaced (e.g. using sequence model: RNN, Transformers, ...) to be optimal in terms of accuracy and training time?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "In this work, authors try to find the most necessary components that are the most optimal in terms of accuracy and training time.\n\n1. The presented SGF approach lies on the Pareto optimality curve on the chart of Accuracy (normalized mean score) and Training time (hours) - Figure 5, where the other points on the Pareto optimality curve are: SPR, DreamerV3, EfficientZero (performs lookahead)\n\n2. The optimal combination of improvements (frame stacking, action stacking, temporal consistency, augmentations, sample-contrastive) has been found to achieve the highest accuracy in five games - Figure 4\n\n3. Optimal sizes of models and training times have been found in Table 6 to achieve the highest possible mean scores\n\n4. The presented experimental results show the necessity of temporal consistency in Figure 7"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors introduce model-based method: SGF (a Simple, Good, and Fast) world model.\nThe simplicity of the model is that: it stacks 4 previous frames and actions to capture short-time dependencies, and it uses data augmentation to enhance robustness. It is trained to provide maximum information and temporal consistency.\nFor simplicity, it does not use: image reconstructions, discretization of representations, sequence models (RNN, transformers, ...), probabilistic predictions for deterministic environments.\n\nThey use several loss functions:\n- MSE-loss for temporal consistency (between next embedding and action-conditioned next embedding)\n- Variance and Covariance regularization loss for Information Maximization (use the current and next batches of embeddings as input)\n- Loss for 3 distributions for Dynamics learning: \n - transition - probability of transition to y'-representation if a-action is applied to y-representation (gradient is only propagated to a-action)\n - reward - probability of r-reward if a-action is applied to y-representation\n - terminal - probability of e-terminal-action if a-action is applied to y-representation\n\nThey train several models using AdamW optimizer: \n- Encoder (f computes representations from current and next input image observations): 4 convolutional layers (kernel size 4, stride 2, pad 1), linear layer dim = 512, norm-layer, SiLU activation\n- Projector (g computes current and next embeddings from current and next representations): MLP with 2 hidden layers of dim = 2048\n- Predictor (h predicts action-conditioned next embedding): MLP with 2 hidden layers of dim = 2048\n- Transition network: MLP with 5 hidden layers of dim = 1024, and a residual connection from the input to the output (suggested to use the sequence model for future work)\n- The networks of the reward distribution, terminal distribution, policy, and value function: MLPs with 2 hidden layers of dim = 1024\n\nThis simple approach achieves shorter training times compared to other world models and good performance on the Atari 100k benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While optimal sizes of models and training times have been found in Table 6 to achieve the highest possible mean scores, this may mean that either the scalability of the approach is limited, or the approach must scale in many directions simultaneously to achieve even higher mean scores. Although the approach lies on the Pareto optimality curve on the Accuracy vs Training time chart, i.e. it is one of many optimal options, it is not shown how this approach can be scaled or improved to achieve the highest accuracy with increasing Training time. \nOr it requires more serious architectural changes, f.e. as the authors suggest for future work - to use the sequence model for transition network.\n\n2. The proposed method is optimal for tasks that are probably simple enough and do not require remembering very old events, so it is sufficient to have a stack of 4 previous frames and actions, and it is not shown how this approach can be transferred to more complex tasks where orders of magnitude more memory of early events is required."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Considering its simplicity, it would be nice and help the paper strongly if scalability can be demonstrated. Can it be scaled?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper contains thorough experiments.\n2. The method is simple and fast."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a world model that uses self-supervised representation learning, captures short-time dependencies through frame and action stacking, and enhances robustness against model errors through data augmentation. This paper is based on a partially observable Markov decision process. It stacks the recent observations and actions to capture short-time dependencies. It introduces stochasticity through data augmentation. To build meaningful representations of observations, this paper enforeces information maximization and temporal consistency of the features."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The strategy of stacking observations and actions seem overfitting to the chosen dataset, Atari 100k benchmark, rather than fitting to the real-world, where long-term dependencies are common.\n2. Related to the first weakness, while the games in the dataset are deterministic, real-world can be very stochastic. It is questionable whether the model can be applied in real-world cases."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "What are the essential components of world models? We present a simple, good, and fast world model, and evaluate it on the Atari 100k benchmark."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024simple,\ntitle={Simple, Good, Fast: Self-Supervised World Models Free of Baggage},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yFGR36PLDJ},\nnote={under review}\n}"
},
"abstract": {
"value": "What are the essential components of world models? How far do we get with world models that are not employing RNNs, transformers, discrete representations, and image reconstructions? This paper introduces SGF, a Simple, Good, and Fast world model that uses self-supervised representation learning, captures short-time dependencies through frame and action stacking, and enhances robustness against model errors through data augmentation. We extensively discuss SGF’s connections to established world models, evaluate the building blocks in ablation studies, and demonstrate good performance through quantitative comparisons on the Atari 100k benchmark. The source code will be made available."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement learning",
"World models",
"Self-supervised learning",
"Atari 100k"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ba3b0a1e4449258d1a7a5e67d2c6265328fb8ca9.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Simple, Good, Fast: Self-Supervised World Models Free of Baggage"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yG1fW8igzP | Data-Augmented Phrase-Level Alignment for Mitigating Object Hallucination | main | Active | Multimodal LLMs;Object Hallucination;Vision-language Models | applications to computer vision, audio, language, and other modalities | 3;5;6;8 | 5;4;4;4 | 2;2;3;3 | 2;2;3;4 | 3;2;3;4 | 5.5 | 4.25 | 2.5 | 2.75 | 3 | -0.800641 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness part."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The DPA loss function is an innovative approach that mitigates object hallucinations by targeting phrase-level distinctions, offering a focused solution for multimodal hallucination issues.\n\n* The data augmentation method is straightforward yet effective, generating hallucinated-correct response pairs that enable the model to learn nuanced differences with minimal complexity.\n\n* DPA demonstrates significant performance gains"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Data-augmented Phrase-level Alignment (DPA), a novel loss function designed to reduce object hallucinations in multimodal large language models (MLLMs) while preserving their general vision-language capabilities. By generating pairs of hallucinated and correct responses through data augmentation, DPA trains MLLMs to distinguish hallucinated phrases from correct ones. Experimental results show that MLLMs fine-tuned with DPA achieve significant improvements, reducing hallucination rates and enhancing performance on visual question-answering and image description tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The core idea of the paper is to generate correct-hallucinated data pairs through data augmentation. However, I have three questions about this process.\n1. In hallucination-related datasets, object hallucination does not frequently occur, raising questions about the validity of replacing every possible object and attribute with hallucinations. Since models seldom generate such hallucinations, this augmentation strategy might introduce excessive \"non-realistic\" hallucination cases, leading to a mismatch between training and real-world distributions, potentially impacting the model's generalization.\n2. The method’s effectiveness may be limited by the diversity of data augmentation. Since hallucinated data generation relies on a finite set of replacements, it may not fully cover the types of hallucinations that could appear in practical applications, limiting the model’s ability to handle unseen hallucinations.\n3. The data augmentation strategy itself lacks independent experimental evaluation. The experiments mainly focus on improvements in model performance across different benchmarks, without assessing the augmentation strategy’s generalization effect and impact on model training stability across tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does DPA perform on other types of hallucinations beyond object hallucination, such as attribute or location hallucinations?\n2. The reported results for some baseline methods, such as VCD, differ from those in the original papers. Did you directly test VCD, or were the results extracted from their papers? If the latter, the comparison may not be entirely fair, as it mixes experimental results with reported findings from other sources.\n3. This paper augments the training data with hallucinated responses by substituting terms in both open-set and closed-set cases. However, simple substitution could potentially impact fluency and grammatical accuracy. Could this approach compromise data quality and, in turn, affect model performance?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper proposes an effective alignment method that successfully mitigates object hallucinations, showing improved scores across hallucination benchmarks in both discriminative and generative tasks.\n2. DPA reduces object hallucinations without impacting the overall performance on VQA tasks, achieving comparable or even higher scores on VQA benchmarks.\n3. The paper provides a variety of quantitative results across multiple benchmarks, including both generative and discriminative tasks. This breadth of evaluation provides some evidence of the DPA’s effectiveness in certain contexts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a Data-Augmented Phrase-Level Alignment (DPA) approach aimed at reducing object hallucinations in Multimodal Large Language Models (MLLMs). The method centers on generating paired “correct” and “hallucinated” responses using data augmentation, which are then used to train a phrase-level alignment loss that reduces the probability of hallucinated tokens. The authors strive to maintain the model’s overall vision-language capabilities while minimizing hallucinations. Experimental results across multiple benchmarks indicate that DPA effectively mitigates hallucinations and may even improve detailed object coverage in generated descriptions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The DPA approach offers limited novelty beyond existing finetuning and data augmentation techniques. While phrase-level alignment is applied in a new way here, it basicly builds on existing concepts and does not significantly advance the field of hallucination mitigation research.\n2. Although the results include some competitive baselines, such as HA-DPO and EOS, several relevant and recent methods are omitted. A more comprehensive comparison would strengthen the evaluation. Additionally, some detailed results for LLaVA-13B and VILA are missing, and the selection of methods across different benchmarks lacks consistency.\n3. While the paper asserts that DPA preserves general vision-language capabilities, the supporting evidence is limited. A broader evaluation across diverse benchmarks would help determine whether this approach impacts overall performance.\n4. The authors highlight the limitations of existing methods, noting they “require massive training data.” However, the proposed DPA also introduces additional training requirements, which suggests a tradeoff between efficiency and effectiveness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. VILA is also based on LLaVA-1.5 SFT. Is DPA equally effective on other architectures, such as Qwen?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The writing of this paper is excellent and very detailed. The experiments are comprehensive, covering most of the popular benchmarks.\n\n2. I really like this paper. Many works that use DPO-like methods to reduce hallucinations experience a decrease in VQA capabilities. The authors identified this issue and proposed a specialized loss to maintain the model’s performance while penalizing hallucinated phrases. Additionally, the authors validated their method separately on non-hallucination benchmarks, such as VQA-v2 and TextVQA, demonstrating its effectiveness. I believe this work makes a significant contribution to reducing hallucinations in MLLMs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel method called Data-augmented Phrase-level Alignment (DPA) to mitigate object hallucinations in multimodal large language models (MLLMs) for vision-language tasks. DPA generates pairs of “hallucinated” and “correct” responses to fine-tune the model, reducing the generation of hallucinated phrases. A KL divergence regularization term is added to retain the model’s general capabilities. Experimental results demonstrate that models trained with DPA exhibit significant improvements in hallucination mitigation and maintain strong performance on general tasks across multiple benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. DPA relies on the quality of the generated “hallucinated-correct” response pairs. If these generated data lack accuracy or diversity, it may affect the model’s training effectiveness and generalization capability.\n2. Although the experimental results demonstrate the effectiveness of DPA, the paper lacks a fine-grained analysis of hallucination types (such as objects, attributes, actions). Such analysis could provide a deeper understanding of the method’s performance across different types of hallucinations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No ethics review needed."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Minors:\n1. The construction of dataset is not introduced with details. Details in appendix should be at least provided via cross-reference.\n2. Figure 5 right is not illustrated in the main text. I find it not easy to understand the information it's trying to convey."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. It is critical to study the issue of hallucination in MLLMs as well as the trade-off phenomenon in the mitigation of hallucination.\n2. The proposed method is simple yet effective.\n3. Extensive experiments verify the effectiveness of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose data-augmented phrase-level alignment to mitigate object hallucination in MLLMs. The method mainly involves the generation of negative hallucinated responses and phrase-level fine-tuning. The hallucination issue of models fine-tuned with the proposed method is alleviated and the general performance is maintained."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation to mitigate hallucination at phrase level is not clearly addressed. There is no illustration on why the phrase-level loss can retain the original performance on general multimodal tasks. It seems straightforward that the constraint on the KL divergence can prevent the fine-tuned model from diverging too far. If it is the only reason, the contribution of the method is greatly weakened.\n\n2. The explanation of Figure 2 is likely to be overclaimed. On line 260, it says \"_EOS achieves a slightly lower hallucination rate_\", but the figure shows that the hallucination rate of EOS is around 5.0 and that of HALVA is around 6.5. This difference is noticeable enough to me as the gap of this metric between HALVA and LLaVA-1.5 is similar. Meanwhile, Figure A right demonstrates that HALVA has a much higher F1 score on AMBER, which is natural because neither EOS nor HA-DPO is trained with Yes/No questions.\n\n3. The presentation of results in experiments is inconsistent. The model lists are different in different tables. For instance, EOS-13B is only shown in Table 1, which makes the verification of the effectiveness less convincing.\n\n4. There are other work mitigating hallucinaton with sub-sequence level training [1]. It is recommended to discuss the difference.\n\n[1] Gunjal, Anish et al. “Detecting and Preventing Hallucinations in Large Vision Language Models.” AAAI Conference on Artificial Intelligence (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce phrase-level alignment method that can be applied to off-the-shelf MLLMs for mitigating hallucinations, while preserving their general vision-language capabilities."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dataaugmented,\ntitle={Data-Augmented Phrase-Level Alignment for Mitigating Object Hallucination},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yG1fW8igzP},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite their significant advancements, Multimodal Large Language Models\n(MLLMs) often generate factually inaccurate information, referred to as hallucination.\nIn this work, we address object hallucinations in MLLMs, where information\nis generated about an object not present in the input image. We introduce Dataaugmented\nPhrase-level Alignment (DPA), a novel loss which can be applied to\ninstruction-tuned off-the-shelf MLLMs to mitigate hallucinations, while preserving\ntheir general vision-language capabilities. To fine-tune MLLMs with DPA, we first\ngenerate a set of 'hallucinated' and 'correct' response pairs through generative data\naugmentation by selectively altering the ground-truth information of the correct\nresponses at a phrase level. The DPA loss is then used to train MLLMs to reduce\nthe likelihood of hallucinated phrases compared to the correct ones. Our thorough\nevaluation on various benchmarks confirms the effectiveness of DPA in mitigating\nhallucination while retaining the out-of-the-box performance of the MLLMs on\ngeneral tasks. For instance, MLLMs finetuned with DPA, which we refer to as Hallucination\nAttenuated Language and Vision Assistant (HALVA), improve F1 by up\nto 13.4% on hallucination visual question-answering and reduce the hallucination\nrate by up to 4.2% on image description tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multimodal LLMs",
"Object Hallucination",
"Vision-language Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/101e55289d27641711778b85d109b1c275b35374.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Data-Augmented Phrase-Level Alignment for Mitigating Object Hallucination"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yGnsH3gQ6U | Image and Video Tokenization with Binary Spherical Quantization | main | Active | quantization;visual compression;visual generation | applications to computer vision, audio, language, and other modalities | 5;6;6;6 | 5;4;3;4 | 2;3;3;3 | 2;3;3;2 | 2;3;3;3 | 5.75 | 4 | 2.75 | 2.5 | 2.75 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents an innovative quantization method (BSQ) that addresses the limitations of existing vector quantization approaches by offering a more efficient and scalable solution.\n2. Extensive experiments on benchmarks such as ImageNet and UCF-101 demonstrate that BSQ-ViT significantly improves reconstruction quality, outperforming prior methods in terms of speed and fidelity.\n3. The methodology is clearly explained with detailed comparisons to related work, and the theoretical basis of BSQ is well-supported with mathematical derivations and experimental validation.\n4. BSQ-ViT's ability to handle both image and video tokenization and perform well on diverse tasks such as compression and generation showcases its generalizability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel image and video tokenizer, BSQ-ViT, based on Binary Spherical Quantization (BSQ) integrated with a transformer architecture. The proposed method achieves state-of-the-art performance in image and video reconstruction with significant improvements in computational efficiency. It introduces a block-wise causal masking for video tokenization, supports variable-length videos, and delivers competitive results in visual compression against standards like JPEG2000 and H.264. BSQ-ViT also shows promising results in image generation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the transformer architecture is explored, the paper does not demonstrate the effectiveness of BSQ within a CNN-based model.\n2. The paper provides limited comparative data in video reconstruction, reducing the robustness of the comparison. Additionally, while block-wise causal attention is noted to impact performance, the study lacks experiments on BSQ without this causal masking. \n3. The reported image and video compression results are better on MS-SSIM, potentially due to the inclusion of perceptual losses like LPIPS and GAN loss in the loss function. However, the lack of subjective evaluation metrics limits the strength of the comparison.\n4. When BSQ and VQ use the same number of indexes, BSQ performs less effectively, and codebook utilization decreases as the bits increase.\n5. In the ablation experiments, the reproduction of the LFQ, which serves as the basis for this work, is too poor in terms of behavior and code usage. And it is premature to conclude that LFQ is less compatible with transformer-based codecs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness part"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The Binary Spherical Quantization seems to show more effective training of the qunatization bottleneck. Analysis shows that the proposed method can provide fast speed and good performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper propose a transformer-based image and video tokenizer with Binary Spherical Quantization, which projects the high-dimensional embedding to lower-dimensional hypersphere and applies binary quantization. BSQ is parameter-efficient without an explicit codebook, scalable to token dimensions and compact. The experiments show that the proposed method achieves comparable compression performance with JPEG2000/WebP for images and H.264/H.265 for videos. And it enables masked language models to achieve competitive image synthesis quality to GAN and diffusion methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Lack of comparison at different bitrate range for visual compression results. Table 4 only provides BPP, PSNR and MS-SSIM for one bitrate point. However, visual compression tasks usually require showing a Rate-Distortion curves and compare at different bitrate points. Your can use BD-Rate metric for more reasonable comparison and analyze the results at low bitrate and high bitrate.\n\n- Test settings for ablation study. Please provide more experiment setting details. In Table 5, do VQ, LFQ and BSQ use the same tokenizers? I observed that the metrics vary significantly across different quantization methods. To alleviate the influence of training and focus on the quantization bottleneck, it is better to: 1. use the same tokenizer 2. freeze the other network parts and only train the information bottleneck parts.\n\n- Complexity. It is better to provide the computation complexity and encoding/decoding time for comparison."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. For image compression, why not use arithmetic coding to improve compression performance? Is intra-frame information used for video compression? If not, why not?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The idea of projecting high-dimensional visual embeddings onto a lower-dimensional hypersphere is straightforward yet effective.\n\n2. The motivation is clear, and the overall presentation is coherent and easy to follow. The experiments are comprehensive and provide convincing evidence to support the approach.\n\n3. The BSQ-ViT model achieves competitive performance in diverse tasks such as image/video reconstruction, generation, and compression."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a transformer-based image and video tokenizer that uses Binary Spherical Quantization (BSQ). By projecting high-dimensional visual embeddings onto a lower-dimensional hypersphere and applying binary quantization, BSQ achieves a bounded quantization error. The authors demonstrate that the proposed BSQ outperforms previous methods in image and video reconstruction, image generation, and compression tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This method uses a transformer encoder and decoder, which limit the flexibility in resolution. How do the authors address this issue?\n\n2. For image and video compression results, it would be beneficial to include an LPIPS comparison to assess perceptual performance.\n\n3. There are a few minor issues. 1) In Eq. (7), $\\hat{q}(c|u) = \\frac{\\exp(\\tau c^{T}u)}{\\sum_{c \\in C_{BSQ}} \\exp(\\tau c^{T}u)}$ might need to be revised to $\\hat{q}(c|u) = \\frac{\\exp(2 \\tau c^{T}u)}{\\sum_{c \\in C_{BSQ}} \\exp(2 \\tau c^{T}u)}$ ? 2) On page 16, line 837, $\\frac{e^{\\tau u_d \\hat{c}_d}}{e^{\\tau u_d \\hat{c}_d} + e^{\\tau u_d \\hat{c}_d}}$ should be corrected to $\\frac{e^{\\tau u_d \\hat{c}_d}}{e^{\\tau u_d \\hat{c}_d} + e^{-\\tau u_d \\hat{c}_d}}$. 3) On page 3, line 118, it would be clearer to use different symbols for the downsample factor $q$ and the bottleneck module $q$."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My major concerns in the weakness part can be summarized as the direct comparison between BSQ, VQ and LFQ on representative image and video tasks are not adequate. As a general tokenizer for image and video, it is important to make direct comparisons with strictly controlled variables."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Visual tokenizers are critical for visual modeling. The proposed Binary Spherical Quantization (BSQ) for unified image and video tokenization is novel and important. \n\nTheoretical contribution is good. It is proved that the proposed BSQ has bounded quantization error. \n\nThe manuscript is well written. BSQ is well placed agains previous VQ and LFQ. \n\nExtensive experiments on visual reconstruction, compression and generation show the better performance of BSQ."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a visual tokenizer based on a Vision Transformer and Binary Spherical Quantization (BSQ). \nThe Transformer-based encoder-decoder leverages a block-wise causal mask and uses only visual tokens from the current or past timestamps for reconstruction. \nBSQ first projects the high-dimensional visual embedding of the encoder to a lower-dimensional hypersphere and then applies binary quantization. \nThe transformer encoder, decoder, and BSQ are seamlessly integrated into the VQ-GAN framework and trained end-to-end. \n\nThe proposed visual tokenizer has several advantages in trading off visual reconstruction quality and computational efficiency and supporting variable-length input. Experiments on visual reconstruction and compression are conducted to verify the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My major concern is the fair comparison with VQ and LFQ. Though the ablation study is provided in tab 5 with image reconstruction task on imagenet-1k, the resolution is only 128, and more tasks should be tested: \n\nIt seems to me the results on image is not as obvious as video. For example, in image reconstruction (tab 1), the proposed method has best image quality, but the parameter number is also larger. It is better to add Ours-VQ for better comparison, as done in tab 2 for video reconstruction. \n\nIn image compression, no VQ or LFQ based method is compared. \n\nFor image generation in tab 3, more steps are used for BSQ, what if we use the same steps as VQ and LFQ? \n\nIn L389, we get a direct comparison between VQ and BSQ, BSQ has more bits (18 vs 14) and the video quality is only comparable with VQ. \n\n \n\n\n\nMinor \nThe derivation for eq. 13 is not given. Please provide a proof or give a reference here. \nThere exist some typos in the derivations. \nIn Line 878, the p shuld be Q. \nIn line 886, the p should be q^hat."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024image,\ntitle={Image and Video Tokenization with Binary Spherical Quantization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yGnsH3gQ6U},\nnote={under review}\n}"
},
"abstract": {
"value": "We propose a new transformer-based image and video tokenizer with Binary Spherical Quantization (BSQ). BSQ projects the high-dimensional visual embedding to a lower-dimensional hypersphere and then applies binary quantization. BSQ is (1) parameter-efficient without an explicit codebook, (2) scalable to arbitrary token dimensions, and (3) compact: compressing visual data by up to 100×\nwith minimal distortion. Our tokenizer uses a transformer encoder and decoder with simple block-wise causal masking to support variable-length videos as input. The resulting BSQ-ViT achieves state-of-the-art visual reconstruction quality on image and video reconstruction benchmarks with 2.4× throughput compared to the best prior methods. Furthermore, by learning an autoregressive prior for adap-\ntive arithmetic coding, BSQ-ViT achieves comparable visual compression results with commonly used compression standards, e.g. JPEG2000/WebP for images and H.264/H.265 for videos. BSQ-ViT also enables masked language models to achieve competitive image synthesis quality to GAN and diffusion approaches."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"quantization",
"visual compression",
"visual generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1b455b6708f0254eb7cbde86577b974c8632a9a4.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Image and Video Tokenization with Binary Spherical Quantization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yGv5GzlBwr | Diffusion Auto-regressive Transformer for Effective Self-supervised Time Series Forecasting | main | Active | Self-supervised Learning;Diffusion Model;Time Series Forecasting | learning on time series and dynamical systems | 3;5;5;8 | 4;3;4;3 | 2;3;3;3 | 2;2;3;3 | 2;3;3;3 | 5.25 | 3.5 | 2.75 | 2.5 | 2.75 | -0.70014 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Check Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ Combining self-attention for inter-patch dependencies, diffusion mechanisms for intra-patch dependencies, and auto-regressive optimization is an interesting and innovative approach to time series forecasting.\n+ The diffusion-based reverse process for reconstructing the sequence is novel in the context of time series forecasting.\n+ Good writing and organization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces TimeDART (Diffusion Auto-Regressive Transformer for Time Series Forecasting), a novel self-supervised learning framework designed to enhance time series forecasting. TimeDART addresses key challenges in the field, particularly capturing both long-term dependencies and local features within time series data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ The combination of Transformer-based attention mechanisms with the denoising diffusion process introduces substantial computational overhead. The paper mentions running experiments on a single NVIDIA RTX 4090 GPU, but it doesn't provide detailed insights into the time and memory consumption required for training. How scalable is TimeDART for larger datasets or real-time applications? \n+ The experiments conducted on noise scheduling, the number of diffusion steps, and the number of layers in the denoising network highlight a significant degree of sensitivity to hyperparameter selection. How the model will perform well in practical settings with minimal tuning?\n+ The cross-domain evaluation shows strong results on energy datasets but weaker performance on datasets like Exchange. Is TimeDART overly sensitive to the type of data it is pre-trained on? For instance, does it struggle with financial or highly volatile datasets because of the lack of shared characteristics between domains (e.g., energy vs. finance)? Could this method benefit from domain adaptation techniques to make the cross-domain transfer more robust?\n+ The model uses a denoising diffusion loss, which may not be the most suitable for every type of forecasting task. How does TimeDART perform with other loss functions (e.g., Quantile Loss, Huber Loss) that are often used in time series forecasting tasks where the goal is to forecast confidence intervals or robustly handle outliers?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see weaknesses."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. We propose a novel generative self-supervised learning framework, TimeDART, which in tegrates diffusion and auto-regressive modeling to effectively learn both global sequence dependencies and local detail features from time series data, addressing the challenges of capturing comprehensive temporal characteristics.\n 2. We design a cross-attention-based denoising decoder within the diffusion mechanism, which enables adjustable optimization difficulty during the self-supervised task. This design significantly enhances the model’s ability to capture localized intra-patch features, improving the effectiveness of pre-training for time series forecasting. Diffusion models and autoregressive attention mechanisms are rare collaborations in temporal tasks, and this field brings new ideas. \n3. The experimental results show that the combination of Mamba and propagation mechanism is very effective, and it also exceeds the predictive performance of supervised learning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Recently, effectively capturing both the global sequence dependence and local detail features within time series data remains challenging. To address this, the authors propose a novel generative self-supervised method called TimeDART, denoting Diffusion Auto-regressive Transformer for time series forecasting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The reviewer is concerned about the computational overhead associated with this approach. The reviewer's core concern stems from the introduction of diffusion mechanisms, which can lead to a substantial increase in training and inference overhead for the model as a whole. If the model is expensive, the computational conditions required to solve the real task will be severe. Therefore, the authors need to report the actual time of the training and inference phase of other baseline models in the future and report the GPU and server model.\n2. The motivation for choosing to use a causal mechanism in Transformer requires further explanation. After all, time series data is encoded with more complex patterns. In particular, there are random changes caused by extreme weather events in the meteorological data, and this causal relationship is strong. But many things cause and effect is unclear, so whether such a component is appropriate needs to be used for the specific task."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Well-written\n2. The author provides the code\n3. The performance of the model is proved by experiments"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "To effectively capture both the global sequence dependence and local detail features within time series data, this paper proposed a novel generative self-supervised method called TimeDART, denoting Diffusion Auto-regressive Transformer for Time series forecasting. Extensive experiments demonstrate that TimeDART achieves state-of-the-art fine-tuning performance compared to the most advanced competitive methods in forecasting tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In the original paper [1], the performance of patchTST seems to be better, and it is suggested that the author should evaluate it more fairly.\n2. Why didn't the author consider evaluating the performance of classification tasks? Currently, aside from forecasting tasks, most self-supervised learning models [2] also focus on the performance of classification tasks. This is because the main purpose of self-supervised learning is to enhance the quality of representations generated by the model and to uncover key semantic features, which is especially important for classification tasks.\n3. A crucial role of self-supervised learning is to improve the performance of backbone models [3], but the author only uses Transformer as the backbone. I am curious whether the proposed method would still be effective if MLP or TCN were used as the backbone.\n4. The author's core motivation remains to capture both global and local dependencies, which is similar to most existing works [1] [4]. In other words, this paper lacks a central challenging problem, making the contribution at the motivational level somewhat limited.\n5. Considering that the core motivation of this paper is to capture global and local dependencies, I suggest the author evaluate the model's performance on datasets with stable patterns, such as PEMS04. This is because datasets like ETT and Exchange have inherent issues with distributional shifts [5].\n\n[1] A time series is worth 64 words: Long-term forecasting with transformers\n\n[2] SimMTM: A Simple Pre-Training Framework for Masked Time-Series Modeling\n \n[3] Cost: Contrastive learning of disentangled seasonal-trend representations for time series forecasting\n\n[4] Segrnn: Segment recurrent neural network for long-term time series forecasting\n\n[5] Exploring Progress in Multivariate Time Series Forecasting: Comprehensive Benchmarking and Heterogeneity Analysis"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "**Question 1:** \n\nThis paper uses an autoregressive prediction paradigm, however autoregressive approaches generally suffer from error accumulation and high inference time overheads. To the best of our knowledge, TimeDART has not designed a special training-inference strategy or model architecture to mitigate these problems. Further discussion of TimeDART's limitations in the autoregressive prediction paradigm is urgent and necessary.\n\n**Question 2:** \n\nAs the authors say ‘we adopted the channel-independence setting’. However, for datasets with a very large number of channels, such as ECL and Traffic, how can a channel-independent design establish connections between multiple channels? Meanwhile, the experimental results of Crossformer[10], SAMformer[11] and iTransformer[12], show that cross-channel modelling is crucial for multivariate time series, and we believe that a discussion on inter-channel modelling for multivariate time series is necessary. However, to the best of our knowledge, TimeDART does not consider these factors in its modelling.\n\n**Question 3:** \n\nSee Weakness 2,3 for our concerns about the experimental results, and we believe that the introduction of competitive and up-to-date baselines is considered necessary. In addition, the authors are expected to explain the phenomena in the ablation experiments, details of which can be referred to Weakness 4.\n\n**Reference:** \n\n10) Zhang, Yunhao and Junchi Yan. “Crossformer: Transformer Utilizing Cross-Dimension Dependency for Multivariate Time Series Forecasting.” ICLR 2023.\n\n11) Ilbert, Romain et al. “SAMformer: Unlocking the Potential of Transformers in Time Series Forecasting with Sharpness-Aware Minimization and Channel-Wise Attention.” ICML 2024.\n\n12) Liu, Yong et al. “iTransformer: Inverted Transformers Are Effective for Time Series Forecasting.” ICLR 2024."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors outline the main approaches to self-supervised learning in the time series domain, including mask reconstruction and contrast learning, and analyses the shortcomings of the two existing self-supervised learning paradigms separately, e.g., the mask reconstruction-based approach introduces a huge gap between pre-training and fine-tuning, and the contrast-learning-based approach prioritizes the capture of discriminative features, which lead to a huge discrepancy between pre-training tasks and fine-tuning tasks.\n\nIn addition, the authors raise two issues critical to the self-training paradigm, 1) how to narrow the gap between the pre-training target and the downstream fine-tuning task, and 2) modelling both long-term dependencies and local pattern information in the self-supervised pre-training phase. However, we believe that TimeDART is not the best solution to these problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes TimeDART, a novel self-supervised learning paradigm that hopes to learn transferable generic representations from unlabeled data through pre-training and then fine-tune them to different downstream tasks, e.g., forecasting tasks, etc. In this paper, a unified self-supervised learning framework is established by fusing the diffusion-denoising process and autoregressive modeling. By learning global sequence dependencies and local detail features in multi-domain time series data, the model's ability to capture comprehensive time series features and cross-domain generalization ability are improved. Specifically, TimeDART designs a cross-attention based denoising decoder in the diffusion mechanism, which improves the effectiveness of time series pre-training by significantly enhancing the model's capability to capture features within local blocks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Weakness 1:** \n\nAs a Diffusion-based model, we would like to introduce more valuable metrics to fully examine the performance of the proposed TimeDART. Specifically, in order to evaluate the prediction accuracy and generalization capability of TimeDART from both forecasting and generation perspectives, existing studies often include the following four metrics (including Context-FID, Correlational Score, Discriminative Score, and Predictive Score) for comprehensively evaluating the performance of Diffusion methods. \n\nIn addition, the proposed TimeDART is not compared with advanced Diffusion-based approaches, such as Diffusion-TS[1], mr-Diff[2], and MG-TSD[3]. We believe that the introduction of more competitive and up-to-date approaches can demonstrate the effectiveness of the proposed method more objectively.\n\n**Weakness 2:** \n\nIn the in-domain setting (Table 2), there are the following weaknesses: \n\n* The performance improvement of the proposed TimeDART over SOTA methodologies SimMTM and PatchTST is less than 5%, which indicates that the performance improvement of the model is not obvious.\n\n* In addition, we note that the performance of \"Random Init\" shown in Table 1 is slightly worse than Supervised PatchTST. Does this indicate that in the supervised setting of a single domain, the proposed model architecture exhibits worse performance compared to PatchTST? If the modeling capability of the TimeDART is poor in small datasets, the model will often show worse generalization ability in cross-domain pre-training, which leads to the rationality of the model architecture being questioned.\n\n**Weakness 3:** \n\nIn the cross-domain setting (Table 3), there are the following weaknesses that can be improved: \n\n* The model is pre-trained on only two power domain datasets (ETT and Electricity). As a result, only the single domain information is included in the model, which limits the generalization ability of the model under cross-domain challenges. Recent unified time series forecasting models include two paradigms. The first is unified models based on LLM fine-tuning, such as OneFitsAll[4] and TimeLLM[5]. This is followed by pre-training on a multi-domain hybrid Time series dataset followed by fine-tuning on specific downstream tasks, e.g., Timer[6], Moriai[7] and MOMENT[8]. In conclusion, we believe that introducing information from more domains during pretraining can improve the cross-domain generalization of the model, and TimeDART is expected to be pretrained on a wider range of datasets. \n\n* Table 3 only shows the performance comparison of TimeDART under different Settings; however, it lacks the comparison with the latest baseline. In fact, recent UniTime[9] have achieved joint pretraining across multiple domains. Therefore, we expect the authors to introduce more advanced baselines to compare with TimeDART, which will help us get a comprehensive understanding of TimeDART's performance.\n\n**Weakness 4:** \n\nIn ablation experiments (Table 4), there are the following drawbacks: \n\n* There is a lack of detailed descriptions specific to ablation experiments, such as in-domain Settings or cross-domain Settings. When we compare the results in Table 4 and Table 2, we can speculate that the ablation experiment is only carried out in the in-domain setting. However, in the cross-domain setting, the reader is eager to know whether the proposed autoregressive diffusion model is effective. \n\n* In Table 4,\" The \"W/o AR\" model obtained the improved \"W/o AR\" model after introducing the Diffusion-based decoder; However, the performance of the latter was slightly degraded on the ETTh2 and Electricity datasets. This may indicate that the predictions of the model become worse when the diffusion model is introduced. This casts doubt on the rationality of introducing a Diffusion-Denoising process in the Decoder.\n\n**Reference:** \n\n1) Yuan, Xinyu and Yan Qiao. “Diffusion-TS: Interpretable Diffusion for General Time Series Generation.” ICLR 2024.\n\n2) Shen, Lifeng et al. “Multi-Resolution Diffusion Models for Time Series Forecasting.” ICLR 2024.\n\n3) Fan, Xinyao et al. “MG-TSD: Multi-Granularity Time Series Diffusion Models with Guided Learning Process.” ICLR 2024.\n\n4) Zhou, Tian et al. “One Fits All: Power General Time Series Analysis by Pretrained LM.” NIPS 2023.\n\n5) Jin, Ming et al. “Time-LLM: Time Series Forecasting by Reprogramming Large Language Models.” ICLR 2024.\n\n6) Liu, Yong et al. “Timer: Generative Pre-trained Transformers Are Large Time Series Models.” ICML 2024.\n\n7) Woo, Gerald et al. “Unified Training of Universal Time Series Forecasting Transformers.” ICML 2024.\n\n8) Goswami, Mononito et al. “MOMENT: A Family of Open Time-series Foundation Models.” ICML 2024.\n\n9) Liu, Xu et al. “UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting.” Proceedings of the ACM on Web Conference 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A novel generative self-supervised learning framework for time series forecasting that simultaneously models global sequence dependencies and captures local detail features effectively."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024diffusion,\ntitle={Diffusion Auto-regressive Transformer for Effective Self-supervised Time Series Forecasting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yGv5GzlBwr},\nnote={under review}\n}"
},
"abstract": {
"value": "Self-supervised learning has become an essential and popular approach for enhancing time series forecasting, enabling models to learn universal representations from unlabeled data. However, effectively capturing both the global sequence dependence and local detail features within time series data remains challenging. To address this, we propose a novel generative self-supervised method called TimeDART, denoting Diffusion Auto-regressive Transformer for Time series forecasting. In TimeDART, we treat time series patches as basic modeling units. For one thing, we employ an self-attention based Transformer encoder to model the dependencies of inter-patches. For another, we introduce diffusion and denoising mechanisms to capture the locality features of intra-patch. Notably, we design a cross-attention-based denoising decoder that allows for adjustable optimization difficulty in the self-supervised task, facilitating more effective self-supervised pre-training. Extensive experiments demonstrate that TimeDART achieves state-of-the-art fine-tuning performance compared to the most advanced competitive methods in forecasting tasks. Our code is publicly available at https://anonymous.4open.science/r/TimeDART-2024."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Self-supervised Learning",
"Diffusion Model",
"Time Series Forecasting"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/30eae5f9d0f0fc3dced313dfde7fd34367baef46.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Diffusion Auto-regressive Transformer for Effective Self-supervised Time Series Forecasting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yHVjncoGSp | Deep Learning Aided Broadcast Codes With Feedback | main | Active | Deep Learning;Wireless Communication;Feedback Coding;Error Control Coding;Federated Learning | other topics in machine learning (i.e., none of the above) | 3;3;3;5 | 5;3;3;5 | 2;2;1;3 | 1;2;1;2 | 1;3;1;2 | 3.5 | 4 | 2 | 1.5 | 1.75 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Questions:\n1. In some numerical results, RPC-BC shows an error floor in the high SNR region but LightBC does not, which is described by \"LightBC behaves somewhat like a linear feedback code\" (Line448). The reviewer wonders the reason of the fact because RPC-BC must be more flexible than LightBC. A possible reason is the hardness of learning RPC-BC. Is there any explanation of the fact? \nAnother question: why LightBC can behaves like linear codes? Does learned LightBC really have some linearlity like a linear code? How to examine it? \n\n2. Is there any possible reason that LightBC outperforms RPC-BC in some settings? Is it related to the hardness of learning models? \n\n3. In Fig.3, why LightBC by federated learning performs close to LightBC by global learning? \n\n4. What is the benefit of fully-learnanable AWGN-BC code? \n\n5. Are the training parameters in Tab. 1 optimized for AWGN-BC? The reviewer is concerned that the use of parameters \"consistent with the training parameters proposed in the single user versions\" (Line 408) may be unsuitable for the AWGN-BC case, resulting in poor performance of RPC-BC.\n\nSuggestions:\n1. The size of trainable parameters should be described. It will be helpful to show how much LightBC is lighter than RPC-BC. \n2. In Line 232, $\\mathbb{R}^{N_s,2}$ should be $\\mathbb{R}^{N_{s,2}}$. \n3. In Line 394, \"The decoder sends the length $|W_\\ell|$−length vector...\" is somewhat confusing.\n4. In Fig. 2(a), the term \"SU\" is confusing because it is denoted by \"TDD\" in the main text."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* The proposed models are the first fully-learnable architectures for an AWGN-broadcast channel (AWGN-BC). They show excellent performance compared with a conventional concatenated linear code. \n* The authors investigate a federated learning strategy for training weights in the models. In particular, they examine the effect of noise in transmitting gradients in the training process.\n* These issues are of importance in terms of future wireless communications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript proposes two classes of deep-learning assisted encoder and decoders for a broadcast channel. The first is an extension of the RNN-based architecture, and the second is a lighter MLP-based architecture. The authors compare them with conventional concatenated codes in various settings, indicating that the proposed architectures outperform conventional codes. In addition, they consider the federated learning scheme for the models and examine the performance when the gradient of trainable weights is transmitted over noisy channels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the reviewer agrees with the importance of the issues, there are several flaws in this manuscript.\n1. **Poor technical contributions** \n\nThe contributions of this manuscript are twofold: the proposal of deep-learning architectures for AWGN-BC and the proposal of federated learning for these models. However, the proposed architectures are simple extensions of existing models for single-user cases. In short, the modifications are only learning multiple decoders and changing the loss function. As for federated learning, the concept is familiar and the modifications of the learning process are straightforward. Including not providing any theoretical analyses, the reviewer cannot avoid judging that the contributions of this manuscript are poor. \n\n2. **Lack of comparison between proposed models**\n\nThe authors proposed two models for AWGN-BC. It is claimed that LightBC is lighter than RPC-BC, but the authors do not explicitly compare the number of trainable weights and/or training costs of these models. This will be important because the comparison of model size is a performance metric other than an error rate. In addition, it will be related to the analysis of the numerical results, e.g., Fig. 3.\n\n3. **Lack of comparison with other models**\n\nAnother issue is that the numerical results do not contain those of other learning-based AWGN-BC codes such as [Li et al. 2022] in the manuscript. Even if Li's model is not fully learnable, as stated in Sec. 1.1, there is no reason to omit the model from comparison in this manuscript. \n\n4. **Possibly insufficient numerical experiments on federated learning**\n\nThe authors state, \"We choose to explore the uncoded downlink versus a quantized method as it has been shown that there is better convergence behavior in federated learning with noisy downlinks Amiri et al. (2021).\" in Line 126-129. However, Fig. 3 only contains the results of uncoded transmission (Line 490). The manuscript seems to be flawed.\n\n5. **No theoretical analysis nor interpretation of numerical results**\n\nThe manuscript contains no theoretical analysis of the proposed models or federated learning process. At the very least, I believe that the interpretation of the numerical results should be described in the manuscript. However, the manuscript only describes the facts from numerical results. See the \"Question\" section for more details."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can the scheme be applied to a block transmission with feedback? I think this scenario is of high importance because cellular systems operate in this regime (hybrid ARQ).\n2. Please provide a more detailed comparison with orthogonal (TDM) schemes when considering noiseless feedback channel. Comparing results of the LightCode paper (arXiv 2403.10751) and Fig. 5 (rate K=3/N=9, and BLER=1e-9 at approximately -1 dB SNR), and results presented in Fig 1. (BLER 1e-6 at the same SNR for K=3/uers and N=18 channel uses).\n3. Please address the problem of error-floor appearing in Figure 1. (RPC, K=3/N=18)."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors consider the case of a multiuser broadcast channel with feedback. Feedback is known to improve the capacity region of the multiuser channel without feedback. Moreover, the behavior of this setup in the finite blocklength regime is not addressed in the literature. Thus, the application of machine learning in designing codes for broadcast channels with feedback seems quite reasonable and interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper considers a broadcast channel with feedback and considers a two-user symmetrical AWGN case. Feedback is known to improve the capacity region of this channel, and the authors consider a machine learning-based approach to construct good codes for this setup. The authors consider both noiseless and noisy feedback.\n\nThe proposed training methods are based on the extension of previous works devoted to a single-user channel:\n - Robust Power-Constrained Deep Learning Algorithm (RPC)\n - LightCode\n\nThe authors consider a simulation setup with small block length equal to 9 or 18 channel uses. The authors consider three different scenarios:\n- noiseless feedback,\n- noisy feedback and comparison with an orthogonal (TDD) scheme,\n- noisy feedback and comparison between different coding rates."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper lacks some discussion related to the broadcast channel with feedback. The authors just say that «the use of feedback in the AWGN-BC channel can far exceed the capacity of the AWGN-BC channel without feedback». I think that more discussion on this topic may significantly improve the understanding of the problem and numerical results analysis. The questions that I suggest to address may be as follows:\n- What are existing theoretical results on capacity region?\n- How exactly the capacity region can be improved and in which setups? \n- What about successive interference cancellation that is widely used in communications? Is it applicable to feedback channels? \n- What about finite block length regime? Were there any attempts to address it?\n\nNext, in the final section, the authors mention that \"... numerical studies indicated that there appears to be not much advantage over using the simple extension of LightBC in the broadcast setting versus utilizing TDD with the single user LightCode in the high rate, noisy regime\", which seems a bit confusing. It seems that the improvement can be achieved exactly when a non-orthogonal transmission scheme is used.\n\nThe authors consider numerical results by addressing a finite (N = 18 channel uses) block length, which is too far from any coding scheme used in practice. I suggest adding some discussion on how the block length can be increased and which results one should expect in this case.\n\nThe main numerical results are presented in Fig. 1 (noiseless feedback), and this figure lacks a comparison with any known theoretical bounds (like orthogonal TDM mentioned later when considering noisy feedback channels or a finite block length random coding bound for multiple access channels without feedback) and practical schemes. More references will also improve the understanding of the provided results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- Can you please better motivate the FL approach? Is the scenario a distributed communication setting with unknown channels? I believe estimating the channels before training the code would be a better approach in general (as is done in practice). Can you please better motivate the underlying scenario? \n\n- RPC-BC seems to do worse than time-division transmission of point-to-point RPC codes. Then, what is the point of considering these codes? LightBC seems to improve compared to time-division, does that hold for longer codes as well? I expect the training will get harder quickly with the code length, and the gains with respect to TD may quickly diminish. Can you please provide additional simulation for longer code lengths? \n\n- Why consider the code rate of 2/3 in Fig. 1 given that the error probabilities are quite high? \n\n- Can you please provide a comparison of these schemes with the linear coding approaches proposed in the literature? Even though you argue that non-linear coding can help, you do not provide any evidence for that. \n\n- How practical these codes in practice? If I understand correctly, a different neural network decoder is trained for each receiver. In a practical setting, where users roam from cell to cell, does it mean that each user has to have all the decoders as they can be assigned as any user i. Similarly, it seems that a separate encoder/decoder are trained for each SNR, which further increases the memory requirement."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The main strength of the paper is to study a communication scenario that has not received much attention, particularly in terms of the application of recently developed neural network based code designs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies error correction coding over a broadcast channel in the presence of feedback. The goal of the transmitter is to convey independent messages to different receivers using the same channel resources. Assuming that the transmitter can see the channel outputs received by the receivers perfectly or with some noise, its goal is to transmit signals in an iterative fashion to gradually improve the decoding reliability of the receivers of their own messages. The author review the literature in this domain, and emphasize the lack of prior work on deep learning aided code design for this problem, although some works have appeared for the single-user channel with feedback.\n\nThe authors extend two approaches recently proposed for the single-user problem to the broadcast channel setting. They also propose a federated learning based training approach for the code."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The novelty of the paper is very limited. The authors mainly take two existing designs and train these models by considering multiple receivers. This is rather trivial, and as such the paper does not introduce any new concept, architecture or tool. \n\n- The federated learning (FL) approach is not well motivated or explained. Why would a vertical FL approach make sense here? As long as the channel model is available, what prevents the encoder from training all the decoders centrally? Is it a complexity issue? The proposed uncoded transmission of gradients, as also observed by the authors, is limiting, and not well motivated. \n\n- Presentation can be improved. Especially in the numerical results part, there are some confusing sentences. \n\n- Comparison is limited to a single relatively weak code from Li et al. It seems that the state of the art for point-to-point channels with feedback (GBAF codes) is not considered in this scenario. Although complexity is argued against these codes, there is no presentation of complexity for the presented schemes."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Included everything in the weakness section"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Relatively unexplored problem space. While deep learning based feedback schemes are well studied for single user AWGN channels, utility of the same for broadcast channels is not well studied and open for improvements. \n2. The paper is well written overall and easy to follow for someone familiar with the problem space. \n3. The concept of federated training of encoder and decoders is novel and interesting and relevant to practical settings. \n4. Performance gains over existing linear schemes is non-trivial and compelling. \n5. The takeway on LightBC being a better choice in low-noise scenarios and RPC-BC being better in high-noise scenarios is sn interesting observation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a new feedback coding scheme for AWGN broadcast channels (AWGN-BC) by extending two of the exising feedback coding schemes for feedback channels, a RNN based scheme and a lightweight-MLP scheme. Further, a new vertical fedearted learning based training scheme is proposed, which reflects a real world scenario more closely. Finally the authors peroform an emoirial study to demonstrate the benefits of each coding scheme under different channel conditions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited novelty from an architecture and training perspective, as both RPC and LightCode schemes are being reused directly from existing works. \n2. No system model included in the main body of the paper, making the problem setup hard to grasp.\n3. Throughout the paper, there was the mention of LightCode being simpler than RPC. But there is not study on the complaexity, memory, and run-time comparison between the schemes makaing it harder to understand the difference between the schemes in terms of resource overhead. \n4. Authors mention in introduction that the capacity of AWGN-BC channel increases in presence of feedback. In the experiemnts section, it would be interesting to see some discussion on this and how the feedback is impacting the performance. Specifically, a reference line indicating the capacity of the channel would make it easier to comprehened the efficiency of the proposed scheme compared to the theoretical limits. \n5. Captions to the figures are rather short and very informative. I suggest updating all the captions to reflect the key takeaway. \n6. Experimental section is written poorly. It's not immediately apparent what authors mean by \"Lin.\" in the plots. More discussion should go into explaining the figures well.\n7. Lack of proper baselines. Since the authors propose modifying two schemes that were originally proposed for single user case, there should be atleast one more learning-based baseline that was designed for 2-user case. The reference Li. et al. \"Deep learning-aided coding for the fading broadcast channel with feedback\" would be a good baseline (despite the BPSK modulation).\n\nMisc.\nMinor typos and grammatical errors should be corrected throughout the paper. ex. in line 147 - \"receives\""
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Deep Learning is applied to broadcast channel codes to leverage the performance of non-linear codes. A global and federated model are trained and results are shown for an RNN-based scheme and lighter-weight scheme."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024deep,\ntitle={Deep Learning Aided Broadcast Codes With Feedback},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yHVjncoGSp},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep learning aided codes have been shown to improve code performance in feedback codes in high noise regimes due to the ability to leverage non-linearity in code design. In the additive white Gaussian broadcast channel (AWGN-BC), the addition of feedback may allow the capacity region to extend far beyond the capacity region of the channel without feedback, enabling higher data rates. On the other hand, there are limited deep-learning aided implementations of broadcast codes. In this work, we extend two classes of deep-learning assisted feedback codes to the AWGN-BC channel; the first being an RNN-based architecture and the second being a lightweight MLP-based architecture. Both codes are trained using a global model, and then they are trained using a more realistic vertical federated learning based framework. We first show that in most cases, using an AWGN-BC code outperforms a linear-based concatenated scheme. Second, we show in some regimes, the lightweight architecture far exceeds the RNN-based code, but in especially unreliable conditions, the RNN-based code dominates. The results show the promise of deep-learning aided broadcast codes in unreliable channels, and future research directions are discussed."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Deep Learning",
"Wireless Communication",
"Feedback Coding",
"Error Control Coding",
"Federated Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/22dcf25f244cb5c843b93abd6f956fa180f17eaf.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/e25fcbd9076322f37c03aa910421b336c26c386d.zip"
},
"title": {
"value": "Deep Learning Aided Broadcast Codes With Feedback"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yHj6EunfVQ | Contextual Self-paced Learning for Weakly Supervised Spatio-Temporal Video Grounding | main | Active | spatio-temporal video grounding;weakly supervised learning | applications to computer vision, audio, language, and other modalities | 3;5;6;6 | 4;4;4;4 | 1;2;3;3 | 2;3;3;3 | 2;3;3;3 | 5 | 4 | 2.25 | 2.75 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please consider the weaknesses section. Essentially, my concerns are regarding the impact of the video encoder and the impact of the choice of tracker. I wish to understand how sensitive the method is to these two components, and whether the improvements are coming from the overall proposed design or certain specific components. I will further discuss this with fellow reviewers before making a final decision on the rating."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The idea is innovative and well motivated.\n2. The paper is well-written and easy to follow.\n3. The method achieves strong performance improvements with respect to the baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces CoSPaL (Contextual Self-Paced Learning), a novel approach to Weakly Supervised Spatio-Temporal Video Grounding (WSTVG), which aims to locate objects in videos based on text descriptions without requiring detailed bounding box annotations during training. CoSPaL proposes three main components to overcome limitations of existing methods: Tubelet Phrase Grounding (TPG) for improved object tracking, Contextual Referral Grounding (CRG) for better query comprehension, and Self-Paced Scene Understanding (SPS) for progressive learning of complex scenarios. Results are reported on common grounding benchmarks such as VidSTG and HC-STVG-v1/v2."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have a few major concerns regarding the proposed method:\n\n1. Regarding the Video Encoder: Existing works such as STCAT and TubeDETR utilize a Resnet-101 backbone to encode each frame. Even recent works such as VGDINO use the Grounding-DINO frozen image encoder. With this method using I3D as a separate video encoder, it could be an unfair comparison with existing works. How does this method perform if the authors utilize the same features as produced by the Grounding-DINO backbone as the video features? To make it consistent with previous works.\n\n2. The method uses a pretrained tracker in the pipeline. I am concerned regarding the impact of this tracker, and how the performance changes if a different tracker is used? There does not seem to be any ablations regarding this."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The problem addressed in this paper is of certain significance and is interesting.\n2. The introduction of self-paced learning is reasonably justified.\n3. Experimental results on different datasets indicate that the algorithm has achieved certain improvements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Targeting the issues of inconsistent predictions in time judgment, difficulty in comprehending complex queries, and the complexity of application scenarios faced by weakly supervised Spatio-Temporal Video Grounding (STVG), this paper introduces the self-paced learning method, which has achieved certain performance improvements on two conventional datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation of introducing self-paced learning should be illustrated more clearly. Are there other methods that can address the challenges presented in this paper? It is suggested to provide a more detailed explanation in the related work section.\n2. In Section 3.2.1, the tracking algorithm in the TPG module seems to play a vital role in learning the whole module. It makes a good start with GroundingDINO in the whole model, and it would be beneficial to add an analysis of the ablation of the tracking algorithm.\n3. Also, in section 3.2.1, the part of the Spatial Grounding Module, the formula description seems confused. It does not give key dimension information, such as in the similarity calculations: $\\text{SIM}(f_{w_m},f_{\\tilde{T_k}})=(\\mathrm{MLP_{q}}(f_{w_{m}})^{T}\\mathrm{MLP_{k}}(f_{\\tilde{T_{k}}}))/\\sqrt{d}$, where $f_{w_m}\\in \\mathbb{R}^{1\\times768}$ and $f_{\\tilde{T_{k}}}\\in\\mathbb{R}^{T\\times{256}}$ obtains from the above, so the $MLP_q(f_{w_m})\\in\\mathbb{R}^{1\\times{D}}, MLP_{k}(f_{\\tilde{T_{k}}})\\in\\mathbb{R}^{T\\times{D}}$, where $D$ is the MLP output dimension, but if it is like this, then matrix multiplication is failed. Besides, in $\\mathrm{A_T}(f_{T}, f_{w_{m}})=\\sum_{k=1}^{K}\\operatorname{softmax}\\left(f_{\\tilde{T}_{k}}, f_{w_{m}}\\right) \\mathrm{MLP}_{v}\\left(f_{\\tilde{T}_{k}}\\right)$, what is the mean about $f_T$? Is $\\sum_{k=1}^{K}$ meant to be a matrix addition of all tubelet features? Then it will give $ A_\\in\\mathbb {R}^{1\\times{D}}$; at this point, how to get the distribution of scores between different tubelets? This part of the equation is confusing.\n4. The comparison parameters of GPU memory use and training time in Figure 5 are ambiguous because the fully supervised models compared in the figure are experimented on different resolutions. However, the paper does not list the information about the resolution of the fully supervised models compared at the time of statistics and the resolution of their own models and whether the hardware parameters (e.g., information about CPU, GPU, memory) are unified across the different models, which I think are essential settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThe paper presents a method CoSPaL to address the key issues in WSTVG.\n2.\tThe paper includes comprehensive quantitative analysis and visualization, providing empirical evidence to support the proposed method.\n3.\tThe proposed CoSPaL model shows strong performance gains on multiple datasets, with notable improvements of 3.9% on VidSTG and 7.9% on HCSTVG-v1."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes CoSPaL (Contextual Self-Paced Learning) for Weakly Supervised Spatio-Temporal Video Grounding (WSTVG). The method introduces three key components: Tubelet Phrase Grounding (TPG), Contextual Referral Grounding (CRG), and Self-Paced Scene Understanding (SPS). CoSPaL aims to improve spatio-temporal video grounding by enhancing the model’s ability to understand complex queries and progressively adapt to more difficult scenarios. The effectiveness of CoSPaL is validated on three benchmark datasets, with significant performance improvements over baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe technical contribution and motivation is unclear. For example, TPG module uses cross-modal attention, contrastive learning, and feature reconstruction to facilitate interactions between words and tubelets, these techniques are common in the field. Why this particular implementation contributes to improved spatial and temporal grounding?\n2.\tThe SPS component is not clearly described. It is unclear how sample complexity is determined within the curriculum learning framework, is the object number? But intuitively, videos with more objects, faster changes, and more interactions are more complex. \n3.\tWhat does “Self-Paced” mean? This concept is not clearly explained in the paper.\n4.\tIn Table 5, the introduction of SPS in the TPG module leads to a performance drop in [email protected] (TPG+SPS vs TPG). This raises concerns about the efficacy of SPS in certain settings. The authors should provide a deeper analysis of why this occurs.\n5.\tIn page 10, the part of Impact on actor localization, the corresponding quantitative results for this claim are missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please address the weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "Three WSTVG benchmarks are used for performance evaluation.\n\nThe compared methods are state-of-the-art. CG-STVG and VGDINO are published in 2024.\n\nThe figures and tables are clear.\n\nVarious figures are used to present the performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper targets the Weakly Supervised Spatio-Temporal Video Grounding (WSTVG) task, which tries to retrieve specific objects and segment by sentence queries without relying on labeled data. The spatial and temporal grounding modules are used to mine the spatio-temporal information. Also, a contextual referral grounding module is utilized to extracts contextual information from query. Finally, a self-paced curriculum learning strategy is adopted for optimization. Some experiments are conducted."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In Abstract, authors state that “we first explore the potential of state-of-the-art object detection models for WSTVG”. In fact, it is wrong since many STVG methods use object detection models, e.g., [1].\n\nWhat is the difference between “self-paced scene understanding” and “self-paced curriculum learning”. In Section 3.2.3, authors use “self-paced scene understanding” as the section title, but all the contents describe the self-paced curriculum learning strategy. Besides, the strategy has been used in many works (Wang et al., 2022a; Soviany et al., 2022). Why you treat it as your third contribution and your title?\n\nThe technological novelty is not enough for ICLR. Most modules in the presented network in Figure 3 are popularly used. For example, cross attention in spatial grounding and temporal grounding.\n\nAuthors should conduct the main abalation study in the main paper.\n\nThe start and end timestamps need to be added in Fig 4.\n\nSome grammatical errors. For example, \"three benchmark WSTVG datasets\" in Abstract.\n\n[1] Weakly-Supervised Spatio-Temporal Video Grounding with Variational Cross-Modal Alignment"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024contextual,\ntitle={Contextual Self-paced Learning for Weakly Supervised Spatio-Temporal Video Grounding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yHj6EunfVQ},\nnote={under review}\n}"
},
"abstract": {
"value": "In this work, we focus on Weakly Supervised Spatio-Temporal Video Grounding (WSTVG). It is a multimodal task aimed at localizing specific subjects spatio-temporally based on textual queries without bounding box supervision. Motivated by recent advancements in multi-modal foundation models for grounding tasks, we first explore the potential of state-of-the-art object detection models for WSTVG. Despite their robust zero-shot capabilities, our adaptation reveals significant limitations, including inconsistent temporal predictions, inadequate understanding of complex queries, and challenges in adapting to difficult scenarios.\nWe propose CoSPaL (Contextual Self-Paced Learning), a novel approach which is designed to overcome these limitations. CoSPaL integrates three core components: (1) Tubelet Phrase Grounding (TPG), which introduces spatio-temporal prediction by linking textual queries to tubelets; (2) Contextual Referral Grounding (CRG), which improves comprehension of complex queries by extracting contextual information to refine object identification over time; and (3) Self-Paced Scene Understanding (SPS), a training paradigm that progressively increases task difficulty, enabling the model to adapt to complex scenarios by transitioning from coarse to fine-grained understanding."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"spatio-temporal video grounding",
"weakly supervised learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e246594b6969ef5fd36f0a1b3a3804e60d30c17e.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Contextual Self-paced Learning for Weakly Supervised Spatio-Temporal Video Grounding"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yI60yhMQ7L | Diversity Helps Jailbreak Large Language Models | main | Withdraw | Attack;Large Language Model;Safety | alignment, fairness, safety, privacy, and societal considerations | Weiliang Zhao;Daniel Ben-Levi;Junfeng Yang;Chengzhi Mao | ~Weiliang_Zhao2;~Daniel_Ben-Levi1;~Junfeng_Yang1;~Chengzhi_Mao2 | 0 | 0 | 0 | 0 | 0 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": {
"value": "We present a novel jailbreaking strategy that employs an attacker LLM to generate diversified and obfuscated adversarial prompts, demonstrating significant improvement over past approaches."
},
"_bibtex": {
"value": "@misc{\nzhao2024diversity,\ntitle={Diversity Helps Jailbreak Large Language Models},\nauthor={Weiliang Zhao and Daniel Ben-Levi and Junfeng Yang and Chengzhi Mao},\nyear={2024},\nurl={https://openreview.net/forum?id=yI60yhMQ7L}\n}"
},
"abstract": {
"value": "We have uncovered a powerful jailbreak technique that leverages large language models' ability to diverge from prior context, enabling them to bypass safety constraints and generate harmful outputs. By simply instructing the LLM to deviate and obfuscate previous attacks, our method dramatically outperforms existing approaches, achieving up to a 62\\% higher success rate in compromising nine leading chatbots, including GPT-4, Gemini, and Llama, while using only 12\\% of the queries. This revelation exposes a critical flaw in current LLM safety training, suggesting that existing methods may merely mask vulnerabilities rather than eliminate them. Our findings sound an urgent alarm for the need to revolutionize testing methodologies to ensure robust and reliable LLM security."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Weiliang_Zhao2",
"~Daniel_Ben-Levi1",
"~Junfeng_Yang1",
"~Chengzhi_Mao2"
]
},
"authors": {
"value": [
"Weiliang Zhao",
"Daniel Ben-Levi",
"Junfeng Yang",
"Chengzhi Mao"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Attack",
"Large Language Model",
"Safety"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "zhao|diversity_helps_jailbreak_large_language_models"
},
"pdf": null,
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Diversity Helps Jailbreak Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
||||||||||
yIN4yDCcmo | INS-MMBench: A Comprehensive Benchmark for Evaluating LVLMs' Performance in Insurance | main | Active | large vision-language model;insurance;multimodal | datasets and benchmarks | 3;5;5;5 | 4;3;4;5 | 1;3;3;3 | 1;3;2;3 | 2;3;3;3 | 4.5 | 4 | 2.5 | 2.25 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "The benchmark primarily focuses on insurance, specifically insurance laws from the United States. This focus may introduce bias into the evaluation process, posing a risk for models developed in different country contexts."
},
"flag_for_ethics_review": {
"value": [
"Yes, Discrimination / bias / fairness concerns"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Check Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Comprehensive Benchmark: The paper presents INS-MMBench, which is the first comprehensive benchmark tailored for evaluating LVLMs in the insurance domain. This benchmark is extensive, covering 8,856 multiple-choice visual questions across 12 meta-tasks and 22 fundamental tasks, providing a robust framework for assessing LVLM capabilities in various insurance scenarios.\n\n2. Systematic Framework: The authors have developed a systematic and hierarchical task definition that ensures the tasks are closely aligned with real-world applications in the insurance industry. This bottom-up approach to task construction enhances the benchmark's relevance and practicality, making it a valuable tool for both research and practical applications.\n\n3. The paper also includes an extensive evaluation of multiple representative LVLMs, offering detailed performance analysis across different insurance types and meta-tasks. This analysis not only validates the effectiveness of the INS-MMBench benchmark but also provides actionable insights into the current capabilities and limitations of LVLMs in the insurance domain, guiding future research and development efforts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces INS-MMBench, a comprehensive benchmark designed to evaluate the performance of LVLMs in the insurance domain. It is the first initiative to systematically review multimodal tasks within the insurance sector and establish a specialized benchmark for it."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Multi-Choice Format Limitations: This benchmark follows a similar style to MMBench and MME in the general multimodal domain, all of which formulate their questions into multiple-choice formats. While this is an effective method for evaluating model performance, it has limitations that prevent generalization to open-ended question answering, which is more representative of real-world applications.\n\n2. Static Benchmark and Data Leakage: The benchmark is static, which does not mitigate the data leakage problem. This will likely render the benchmark less effective in future developments.\n\n3. Focus on US Insurance Law and Potential Bias: The benchmark primarily focuses on insurance, specifically insurance laws from the United States. This focus may introduce bias into the evaluation process, posing a risk for models developed in different country contexts."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Regarding the first limitation, could you share your perspective on how the current selective tasks directly align with the actual stages in the insurance process? For example, specific insurance stages like underwriting or claims processing?\n\nI would consider slightly increasing the score if convinced that the benchmark specifically addresses key insurance stages, rather than being a collection of VQA tasks merely related to the selected insurance categories(auto, property, health, and agriculture)."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The motivation behind establishing an insurance benchmark is worthwhile. Evaluating LVLMs' capabilities on core insurance stages like underwriting and claims processing is practical and meaningful.\n2. The benchmark covers a reasonable range of core insurance types relevant to key areas in everyday insurance applications.\n3. The study provides an insightful error analysis, highlighting the current limitations of LVLMs in interpreting insurance-specific visual content."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The potential for Large Vision Language Models (LVLMs) to impact the insurance industry is substantial, yet largely unexplored. This study establishes a benchmark to evaluate LVLM capabilities within the domain, focusing on four main insurance types: auto, property, health, and agriculture. To create the benchmark, the authors gathered multimodal data for each insurance category from public sources and converted it into multiple-choice questions using GPT-4o. They then evaluated popular LVLMs on this benchmark to provide an initial assessment of LVLM performance and reveal current limitations in handling insurance-related content by an error analysis. Finally, the authors try to address gaps in insurance knowledge and reasoning skills by adding insurance-related information to the prompt."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Misalignment between Intent and Implementation**: While the authors claim the benchmark includes 12 meta-tasks and 22 fundamental tasks across stages like underwriting and claims processing in the Introduction section, the tasks illustrated in the paper are only loosely related to these stages. For example, meta-tasks in auto insurance such as “vehicle information extraction” and “vehicle damage detection” focus heavily on general computer vision tasks rather than directly addressing insurance-specific stages. This makes the benchmark feel more like a vision task set than an insurance task set. \n2. **Limited Accessibility for Reproducibility**: Although the authors promise to release the code and dataset, the GitHub repository has not been updated in four months, containing only a readme and a few diagrams. This lack of resources limits my ability to further assess the benchmark’s true rationality and effectiveness.\n3. **Limited Novelty**: Some conclusions, such as “performance of closed-source LVLMs varies by training data size and methods,” are too general and widely understood, offering little new insight. The paper would benefit from focusing on more specific findings directly related to the insurance domain."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can you provide more details on how the benchmark tasks were selected?\n\nIt would be helpful to understand the criteria used to determine which tasks were included in INS-MMBench. Specifically, how did you ensure that the tasks accurately reflect real-world insurance challenges and not just general visual recognition problems?\n\n2. Have you considered including more complex, multi-step reasoning tasks?\n\nGiven the importance of decision-making in insurance, would it be possible to expand the benchmark to include tasks that require multi-modal integration and reasoning (e.g., verifying a claim using images, text descriptions, and numerical data)? This could better showcase the strengths and weaknesses of LVLMs in handling real-world scenarios.\n\n3. How do you envision improving the interpretability of model evaluations?\n\nSince explainability is critical in insurance, have you considered adding tasks that require LVLMs to provide justifications or rationales for their answers? This could allow for a deeper evaluation of how well models understand and explain their decisions, which is crucial for real-world applications.\n\n4. Do you have insights on the performance gap between open-source and closed-source models?\n\nThe results indicate a narrowing gap between open and closed-source models. Can you elaborate on specific factors contributing to this trend, and how future benchmarks might encourage more competitive open-source solutions?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) Originality: INS-MMBench is the first benchmark tailored to evaluate LVLMs in the insurance domain. The authors' approach to defining tasks using a bottom-up hierarchical methodology is innovative and ensures that the benchmark aligns with real-world insurance scenarios, making it a pioneering effort in applying LVLMs to this new domain.\n\n(2) Quality: The authors systematically identify and organize multimodal tasks across four types of insurance, and their comprehensive evaluation of ten LVLMs provides some insights. The inclusion of detailed error analysis and the exploration of prompt engineering techniques to mitigate common issues further strengthen the paper, offering practical suggestions for improving model performance.\n\n(3) Clarity: The authors explain each step of their methodology in detail. \n\n(4) Significance: The introduction of INS-MMBench contributes to the field, as it enables a more nuanced evaluation of LVLMs in a domain with substantial practical applications. The benchmark could lead to improved automation in insurance-related tasks, such as claims processing and fraud detection, thus enhancing efficiency and accuracy in the industry. Moreover, by highlighting the narrowing performance gap between open-source and closed-source LVLMs, the paper encourages further research and development, potentially driving advancements in accessible and effective AI solutions for the insurance sector."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a benchmark in the systematic evaluation of LVLMs in this field by introducing INS-MMBench, a domain-specific benchmark designed to assess these models across various insurance-related tasks.\n\nKey contributions:\n\n(1) INS-MMBench is the first comprehensive benchmark tailored for the insurance domain. It covers four representative types of insurance: auto, property, health, and agricultural insurance, reflecting real-world insurance scenarios such as underwriting and claims processing.\n\n(2) The authors used a bottom-up hierarchical task definition approach to identify and refine relevant tasks for each insurance type. They collected and processed datasets to create visual question-answer pairs, ensuring that the benchmark aligns with practical applications in the insurance industry.\n\n(3) The paper evaluates different LVLMs using INS-MMBench. The results highlight the challenges these models face and give some insights."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Benchmark Definition Lacks Depth in Insurance Scenarios\n\nWhile INS-MMBench introduces tasks related to insurance, many are more aligned with general, common-sense VQA rather than specialized, nuanced scenarios seen in real-world insurance applications. To better reflect practical needs, the benchmark should include more complex tasks, such as multi-step reasoning or risk assessment based on a mix of visual and contextual data.\n\n2. Overemphasis on Basic Tasks\n\nSome tasks, like license plate recognition, are too basic and can be handled by smaller, specialized models. Evaluating LVLMs on these tasks does not showcase their strengths. Instead, the benchmark should focus on tasks requiring more advanced reasoning, such as verifying claims by cross-referencing multiple data points, to highlight the real capabilities of LVLMs.\n\n3. Limited Emphasis on Reasoning and Higher-Order Tasks \n\nThe benchmark lacks tasks that test higher-order reasoning, which is crucial for insurance scenarios. Tasks involving contextual understanding, complex decision-making, and multi-modal integration would better evaluate how well LVLMs can handle real insurance industry challenges.\n\n4. Lack of Focus on Interpretability \n\nInsurance applications require transparency, yet INS-MMBench primarily uses multiple-choice questions, limiting the ability to assess whether models can explain their decisions. Future benchmarks should include tasks that require LVLMs to provide rationale, enabling evaluation of their interpretability, which is critical for building trust in automated systems.\n\n5. Clarification Needed on Table 1\nIt appears there might be an error in the labeling of the last two rows in Table 1. Currently, \"OmniMedVQA\" is described as domain-specific for math, and \"Mathvista\" as domain-specific for medical. Given the names and typical use cases, it seems like these two may have been accidentally switched.\n\nRecommendations:\n1. Integrate more complex, domain-specific scenarios that mimic real-world tasks.\n2. Replace basic tasks with challenges requiring higher-order reasoning and contextual analysis.\n3. Add tasks that require models to explain their answers, enhancing interpretability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How do LVLMs perform on insurance tasks that related to temporal reasoning for example analyzing claim patterns over time? The work evaluates static image understanding, but many insurance tasks require understanding temporal relationships and changes over time, and it seems that those samples are missing from the current benchmark."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written and well-organized. \n- This is the first systematic benchmark specifically designed for the LVLMs evaluation in the insurance domain and fills a gap in the current benchmark that often overlooks domain-specific applications.\n- The experiments are comprehensive, and thorough error analysis categorizing different types of model failures is provided in the paper."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces INS-MMBench, the first comprehensive benchmark designed to evaluate LVLMs in the insurance domain. INS-MMBench includes four insurance types: auto, property, health, and agriculture, and includes 8856 multiple-choice questions across 12 meta-tasks and 22 fundamental tasks. It is designed to evaluate LVLMs in practical insurance tasks, such as vehicle damage detection and health risk monitoring, combining real-world visual information with insurance-specific questions. Through the experiments, the authors show the current limitations of LVLMs in insurance domain and suggests targeted data and domain knowledge for improving the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The human baseline experiments only involve 3 graduate students specialized in insurance, which is a small sample size. This might not accurately represent the range of expertise and variability in the real-world insurance evaluations. I would suggest bringing in more experts from the industry to help perform the human evaluation. \n- This work does not discuss potential biases in the data sources or methods for mitigating them, which means that there is a risk that the benchmark may favor certain model behaviors or fail to generalize to different insurance scenarios."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024insmmbench,\ntitle={{INS}-{MMB}ench: A Comprehensive Benchmark for Evaluating {LVLM}s' Performance in Insurance},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yIN4yDCcmo},\nnote={under review}\n}"
},
"abstract": {
"value": "Large Vision-Language Models (LVLMs) have demonstrated outstanding performance in various general multimodal applications such as image recognition and visual reasoning, and have also shown promising potential in specialized domains. However, the application potential of LVLMs in the insurance domain—characterized by rich application scenarios and abundant multimodal data—has not been effectively explored. There is no systematic review of multimodal tasks in the insurance domain, nor a benchmark specifically designed to evaluate the capabilities of LVLMs in insurance. This gap hinders the development of LVLMs within the insurance domain. In this paper, we systematically review and distill multimodal tasks for four representative types of insurance: auto insurance, property insurance, health insurance, and agricultural insurance. We propose INS-MMBench, the first comprehensive LVLMs benchmark tailored for the insurance domain. INS-MMBench comprises a total of 2.2K thoroughly designed multiple-choice questions, covering 12 meta-tasks and 22 fundamental tasks. Furthermore, we evaluate multiple representative LVLMs, including closed-source models such as GPT-4o and open-source models like BLIP-2. This evaluation not only validates the effectiveness of our benchmark but also provides an in-depth performance analysis of current LVLMs on various multimodal tasks in the insurance domain. We hope that INS-MMBench will facilitate the further application of LVLMs in the insurance domain and inspire interdisciplinary development."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large vision-language model",
"insurance",
"multimodal"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a8564e8547b762c3d4ddd5f30ba9d184a07ff782.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/ce43e2c17282ed6b8f8737b19b9bc6be05415252.pdf"
},
"title": {
"value": "INS-MMBench: A Comprehensive Benchmark for Evaluating LVLMs' Performance in Insurance"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yIRtu2FJvY | A Matrix Variational Auto-Encoder for Variant Effect Prediction in Pharmacogenes | main | Active | variant effect prediction;variational auto-encoder;transformer;deep learning | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;3;3 | 4;4;3;4 | 3;2;1;3 | 1;1;1;2 | 2;1;1;4 | 3 | 3.75 | 2.25 | 1.25 | 2 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- How much does performance change if attention maps in self-attention layers are learned as opposed to derived from AF2 predicted contacts?\n\n- Based on my understanding of Fig. 1, the hyperparameter $d$ should be 20, corresponding to the number of amino acids. Why do you not embed amino acids in a higher dimensional space as you get deeper into the VAE?\n\n- Instead of using a dimension-wise FC layer, do you think it would be valuable to have an attention pooling layer to reduce the number of parameters (# of parameters in attention pooling does not scale with the length of the sequence)? It also automatically allows you to handle sequences of different lengths. \n\n- For matENC-DMS, are variants at the same position either all in the training set or the testing set? Training on some varaints and testing on other variants at the same position has been shown to be a form of data leakage that inflates performance.\n\n- Does difference in performance between matENC-DMS and matVAE-MSA depend on the number of variants assayed in the DMS? Could you include that In Fig. A8?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors explore novel architectural innovations to the DeepSequence/EVE family of models. In particular, they use self-attention layers where the attention map is determined from predicted contacts in AF2 structures. They also place more expressive priors on the latent space in matVAE-MSA. These are innovative ideas that have not yet been considered in the field. \n\n- The authors clearly benchmark their method to other state-of-the-art methods and clearly show the impact of their architectural modifications on model performance. This is one of the clearest papers I have read."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces two new methods for variant effect prediction: (1) matVAE-MSA which is a VAE trained on sequences from the protein family of interest and (2) matENC-DMS which is trained on DMS data from a specific protein. matVAE-MSA is similar to DeepSequence and EVE but the authors introduce architectural changes (i.e. self-attention layers) and experiment with more complex priors: mixture of diagonal Gaussians and VAMP. On pharmacogenes in ProteinGym, matVAE-ESM underperforms DeepSequence and ESM models. However, matENC-DMS, whose architecture is exactly the encoder portion of the DMS data and is trained on functional activity scores from a DMS assay, outperforms all other methods. Given that models trained on DMS data do significantly better than models solely trained on sequences from a MSA, the authors conclude that DMS data is valuable to train variant effect predictors on pharmacogenes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The primary weakness of this paper is that their VAE model does not outperform existing unsupervised variant effect predictors (like ESM or DeepSequence). They find that using a more complicated prior does not improve performance and that self-attention layers with attention maps defined using AF2 contacts does not help. \n- The authors do go on to say that their encoder-only model trained on DMS data does outperform unsupervised variant effect predictors, but they do not compare to unsupervised variant effect predictors fine-tuned on DMS data. Their are many ways this fine-tuning has been proposed in the past and the authors should benchmark matENC-DMS to those methods: https://pubmed.ncbi.nlm.nih.gov/35039677/, https://www.nature.com/articles/s41467-024-51844-2, and https://arxiv.org/abs/2405.06729. \n- The authors should try pre-training a VAE on MSA data and then fine-tuning the encoder of the MSA on DMS data. \n- A central goal of the paper seems to be to identify the settings in which DMS data is useful for improving variant effect predictions. In Fig. A8, the authors are unable to find any correlations between metadata of the protein and performance difference between the DMS-trained and MSA-trained models. However, they don't consider structural features of the protein itself. Analysis along those lines would be interesting."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See \"Weaknesses\""
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* **Novelty**: To the best of my knowledge, the authors' proposed framework is novel.\n* **Impact**: The authors' motivation (i.e., assessing how well evolutionary pressure corresponds to fitness and the corresponding impact on variant effect prediction) is solid, and such studies would likely be of interest to the machine learning for proteins community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Here the authors propose a framework based on representation learning via VAEs for use on downstream deep mutational scanning tasks. The authors discuss their design choices and then proceed to benchmark their method against standard baseline methods for these tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Despite the potential impact of the authors' work, I believe that the authors' submission has significant issues that prevent me from recommending acceptance at this time. I provide details on the major issues below:\n\n* **Unclear motivation for model design choices**: The authors spend a significant amount of time experimenting certain modeling/architecture choices (e.g. using a mixture of gaussians prior rather than a unimodal prior), which in the end don't have an impact on model performance. Indeed, this is listed as one of the authors' main contributions in the introduction. Could the authors comment on their rationale for exploring these modeling choices? Did previous results for this task find that more expressive priors led to improved performance? Or was there a more principled reason to assume that these specific priors would lead to better performance? Without more context it's hard for the reader to understand why these results are being presented. On a related note, it would be great to see an ablation study assessing the impact of training a predictor on DMS datasets using representations from previous methods (e.g. DeepSequence) compared to the same task with the authors' proposed architecture. Without this information, it's difficult for the reader to understand whether any boosts in performance for the models trained on DMS data can be attributed to the authors' proposed encoder network or if the results are solely due to training on DMS data.\n* **Unclear significance of experimental results**: Perhaps most importantly, it's not clear to me that any meaningful conclusions can be drawn from the experimental results (e.g. those presented in Table 3). In particular, given the large error bars it's difficult for the reader to assess if the provided results are statistically significant. Could the authors provide results from e.g. a t test? Moreover, it's not clear to me how the authors selected their final model hyperparameters (e.g. learning rates). The authors mentioned that their choices \"preserv[ed] stability and convergence\", but without more details it's hard to tell if these values were cherry-picked. Were these parameters e.g. chosen via cross-validation/performance on a held-out validation set? Given these issues, it's thus unclear whether the authors' claims are supported by their experimental results.\n* **Not self-contained/writing issues**: Given that ICLR is a general machine learning conference (as opposed to a more biology-focused venue), it would greatly improve the manuscript for the authors to spend more time in the introduction describing the problem setup and significance. Indeed, the introduction section to the manuscript feels extremely rushed, with little time spent on introducing the problem setting tackled by the authors. For example, providing a gentler introduction to domain-specific terms like deep mutational scanning, On the other hand, a significant amount of space is spent describing hyperparameters (e.g. sections 3.1/3.2) or details of individual datasets (e.g. section 2.3 + Table 1), which could be relegated to the Appendix. I would thus recommend that the authors restructure the manuscript so that the main text is self-contained for a general ICLR reader, with ancillary experimental details moved to the appendix to make space as needed. On a related note that could save some space, it's unclear to me why the authors spend a significant amount of time introducing certain mathematical/machine learning concepts which are subsequently not used in the method (e.g. introducing matrix decomposition before stating that transformer layers are used)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. If pharmacogenes experience low evolutionary pressure, why is MSA included? (line 18)\n2. What is the significance of the statement \"sets a new benchmark for 2 out of 26 proteins\"? (lines 21-22)\n3. Regarding the \"5-fold cross-validation framework,\" what is the rationale for training a model to fit a DMS dataset rather than conducting zero-shot predictions? For a new protein, if a DMS dataset is already available, it is likely that users have already identified favorable mutants.\n4. Are there any differences between the MSA sequences used and those provided by the ProteinGym benchmarks? If so, why not utilize ProteinGym's versions?\n5. Why is there a manual selection of a threshold to binarize the DMS target scores? What are the specific criteria for this selection and what are the resulting values?\n6. Could you clarify the statement \"For instance ESM2 (150M), ESM2 (15B) and ESM-1v (ensemble) are all flavors of ESM\"?\n7. How should we interpret the statement \"We compared with those models in ProteinGym which perform the best on at least one pharmacogene-related protein DMS dataset according to SpearmanR\"? Does \"perform the best\" refer to zero-shot prediction or supervised learning tasks? What are the candidate models and their respective scores? Have all models on the ProteinGym leaderboard been considered in this comparison?\n8. While the algorithm does not apply any specific designs to pharmacogene, it seems like a general framework that can be applied to any mutation effect prediction tasks. In this case, it is suggested to test the model on the complete ProteinGym benchmarks of all 217 assays."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- A clear presentation on the new transformer-based module for the encoder and decoder.\n- A comprehensive investigation and analysis on the impact of different designed modules to the prediction task."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an encoder-decoder language model similar to DeepSequence for the task of mutation effect prediction and tests it on 33 drug-related DMS datasets from ProteinGym."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The presentation of the motivation is unclear (Q1, 3).\n- The justification for the experimental design and significance of the results is not clearly articulated (Q2, 3, 4, 6).\n- The design of the prediction tasks appears to be questionable (Q3, 5, 8).\n- The comparison with baseline methods is incomplete (Q7)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Can the authors more clearly explain what they feel are the contributions of the paper?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The experimental results are cleanly laid out and the authors do a nice job of not over-selling their results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper first proposes an unsupervised variant effect predictor (VEP), matVAE-MSA. The model is a VAE with transformer layers for the encoder and decoder with a sparse attention mask derived from the WT AlphaFold-predicted structure. Additionally the authors propose a supervised model, matENC-DMS which uses the same encoding architecture as matVAE-MSA, but maps the encoded vector to a scalar to predict the fitness label. matVAE-MSA does not consistently outperform existing unsupervised approaches, but does provide the best results for two proteins, MK01 and RAF1. matENC-DMS, which is trained on family-specific DMS data, consistently improves performance over unsupervised approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Unfortunately I see a number of weaknesses with the current paper. First, the use of a transformer as a VAE for modeling protein families is not new and exists in ProT-VAE. Nonetheless, given that this new architecture does not outperform existing simple VAE architectures it is not clear what contribution this paper is making with this? While the matENC-DMS results are interesting, there is no benchmarking with other supervised approaches. In particular, the paper \"Learning protein fitness models from evolutionary and assay-labeled data\" addresses combining evolutionary data with DMS data. At a minimum the authors should compare to this approach. Overall, I wasn't able to see what contributions the authors are making. The new VAE model doesn't seem to add any new insights into how to design better unsupervised generative models and it does not outperform existing approaches. The supervised model is not benchmarked against any similar approaches and does not provide any new insights."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Matrix Variational Auto-Encoder for Variant Effect Prediction in Pharmacogenes},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yIRtu2FJvY},\nnote={under review}\n}"
},
"abstract": {
"value": "Variant effect predictors (VEPs) are designed to predict the impact of protein variants on cellular function, traditionally using data from multiple sequence alignments (MSAs). This assumes that natural variants are fit, a premise challenged by pharmacogenomics, where some pharmacogenes have low evolutionary pressure. In this context, deep mutational scanning (DMS) datasets are of particular interest since they provide quantitative fitness scores for variants. In this work, we propose a transformer-based matrix variational auto-encoder architecture and evaluate its performances on $33$ DMS datasets corresponding to $26$ drug target and absorption-distribution-metabolism-excretion (ADME) proteins available in the ProteinGym benchmark. Our model trained on MSAs (matVAE-MSA) outperforms a model similar to the widely used VEPs in pharmacogenomics, and sets a new benchmark for $2$ out of $26$ proteins. We compare matVAE-MSA with matENC-DMS, a model with similar capacity, but trained and evaluated on DMS data in a 5-fold cross-validation framework. matENC-DMS outperforms both the best available model for $15$ out of $33$ DMS datasets and matVAE-MSA for all ADME, and certain drug target proteins. Our results shed new light on the role of evolutionary pressure for the validity of the premise of VEP design. In turn motivating the development of DMS datasets to improve VEPs on pharmacogene-related proteins."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"variant effect prediction",
"variational auto-encoder",
"transformer",
"deep learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1f7b99f9edc86487ebb5ec005085bfc7fd2e9931.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "A Matrix Variational Auto-Encoder for Variant Effect Prediction in Pharmacogenes"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yIbSXuLoO1 | Set-Size Dependent Combinatorial Bandits | main | Active | Combinatorial Multi-armed Bandit;Set-Size Dependent;Online learning | learning theory | 3;3;5;6 | 4;4;3;3 | 2;2;2;3 | 2;3;2;3 | 2;3;2;4 | 4.25 | 3.5 | 2.25 | 2.5 | 2.75 | -0.96225 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Can SSD-CMAB be reduced to CMAB problem by setting some constraint to the super arms?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "a new setting"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a new bandit setting, \ncalled Set-Size Dependent Combinatorial Multi-Armed Bandits (SSD-CMAB). The key difference between SSD-CMAB with the classical CMAB is the reward distribution associated with each base arm depends on the size of the played super arm in that round.\nThey propose a novel algorithm, SortUCB, for solving SSD-CMAB.\n\nThe writing is not that clear. I suggest adding a learning protocol for your proposed learning problem. Also, using more than half a page for notations is not that space efficient. It can be simplified."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main concern is I am not convinced of the definition of regret. In my understanding, different rounds $t$ have different optimal arms, as the reward distributions depend on the size of the pulled super arm in that round. However, in (1), the optimal super arm is fixed. \n\n\nAlso, It is not fair to compare regret bounds for different settings, stated in the abstract.\n\nRegret lower bound: partially tight? What does it not mean? Also, the derived regret lower bound is asymptotic. In addition, this work never specifies how to characterize a SSD-CMAB problem instance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Some questions for the authors : \n\n- **Pivoting paper around SD-CMAD**: Observing that SD-CMAB is the more generic version of SSD-CMAB, why did the authors not decide to pivot the entire discussion around SD-CMAB and provide SSD-CMAB as a special case with better performance?\n\n- **Real-world datasets**: Is it possible to have more real-world dataset experiments? this connects with requirements for more practical/ real-world problem setups similar to SSD-CMAB. \n\n- **($n_1, n_2$)-efficiency oracle**: I can understand the need for such an oracle for comparison purposes. Is this a novel idea of the paper, or has this been used in essence in other literature? In either case, a thorough discussion on the same seems to be missing in the paper."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The following would contribute to the strengths of the paper:\n- **Clear Writing**: The paper is well written, precise, and to the point. The paper does a good job going slowly and explaining the inner workings of the algorithms and intuitive understanding of the paper\n\n- **Innovative Algorithmic Solutions**: The novel proposed methods SortUCB and SortUCB-SD provide attractive regret upperbounds and their performance is further bolstered using the synthetic experiments.\n\n- **Theoretical performance guarantees**: The paper provides theoretical proof of both the regret upper bound and the partial tightness to the fundamental lower bound for the SSD-CMAB problem. \n\n- **Experiments**: The paper provides synthetic implementation with multiple baseline methods and showcases the prowess of the SortUCB method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a variant of the standard CMAB that allows arms to change reward means based on the set size of the super arm, which is pulled, provided that the *order* of the arms in terms of their reward means is preserved. \n\nThe paper tackles this new problem setup using a novel proposed algorithm SortUCB which splits the bandit process into three stages: Elimination, Sorting, and UCB Phase. SortUCB focuses on eliminating \"bad\" super arms fast and then focusing on a curated set of good super arms. The paper provides regret upper bounds as well as instance-dependent lower bounds on the set-size-dependent CMAB. The paper provides experimental synthetic evaluations on several baseline algorithms and showcases the improvement over them. \n\nThe paper then extends the current algorithm to set dependent combinatorial bandit where the base arms are allowed to have different reward distributions even with the same set size and the set of super arms can be dictated by further constraints. This broadly generalizes the initial setup and makes it much more applicable. They provide another algorithm SortUCB-SD to tackle this setup using a ($n_1, n_2$)-efficiency oracle and prove regret upper bound for the same."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are very few loopholes in the paper. The following are some points to work on further:\n\n- **More justification on the Problem Setup**: The paper provides some justification on why this particular variant of CMAB is interesting and worth looking at, but it is not enough. A few dedicated paragraphs of potential application here would be a great addition. \n\n- **Extension to broader class seems out of place**: The paper provides a great in-depth explanation and theoretical and experimental support to SortUCB, but the same is missing for SortUCB-SD.\n\n- **A niche class of perfect lower and upper-bound matching**: The paper does attempt to explain in words how the regret bounds are tight. It might be better to phrase it as a corollary or a lemma on the small class of problems where it the upper and lower bound expressions are tight. Or is there a gap between the two fundamentals?\n\n- **Experiment Replicability**: (Apologize if I missed this) I do not see any code files or URLs where I can run and verify the experimental evidence provided."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Why is not the CTS algorithm from S. Wang et al. (ICML2018) compared with the SortUCB algorithm in the Experiments section? I believe this algorithm performs empirically well in semi-bandit problems.\n- In Section 5 (about SD-CMAB), there is a notion of \\emph{class} whose intuition is ambiguous. Taking the example of paths, what are the $K$ classes of paths?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The problem setting seems novel since the ordinary CMAB assumes that the rewards of each base arm do not depend on the super arm. The SSD-CMAB framework can model some online advertising where the click rate for each ad will decrease when a large number of ads are shown to a user at once."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new framework of combinatorial multi-armed bandits named Set-Size Dependent combinatorial multi-armed bandit (SSD-CMAB). In SSD-CMAB, the reward distribution of each base arm depends on the size of the chosen super arm. As a key assumption, there is an order preservation, which means that the order of the reward means of base arms is independent of the set size. This work shows upper and lower bounds of regret and shows that their proposed algorithm, SortUCB, is partially tight. Finally, it numerically evaluates SortUCB."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It seems that Algorithm 1 has some issues in computational complexity. $N_{S, t}$ maintains the number of times super arm $S$ has been selected. However, in general, the size of the set of super arms is exponentially large with respect to the number of base arms. Therefore, this algorithm potentially consumes an enormous amount of memory to maintain $\\\\{N_{S, t}\\\\}_{S = 1, \\ldots, |\\mathcal{A}|}$. In addition, finding the super arm with the highest value for (4) is a nonlinear optimization problem with combinatorial constraints, which is computationally heavy.\n- In my understanding, combinatorial MAB means that there is a combinatorial structure in the set of super arms. However, SSD-CMAB defines a set of super arms as a subset of base arms whose cardinality is no more than a certain number. I am not sure if we can say this as a combinatorial structure."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How does the SortUCB compare, in practice, against submodular bandit works, given that they consider a variable arm distribution when selected within different sets? \n\nThe remainder of the algorithm (UCB Phase) focuses solely on exploitation within the set A of L main arms. It is not clear why the considered subsets of A are only L^2 and not 2^L? The subsets that can be composed with L arms are 2^L. If so, the regret should be exponential with L and not quadratic!"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors derive a lower bound for the studied problem. \n\nThe authors derive a regret upper bound for the proposed SortUCB algorithm. \n\nThe authors conduct some numerical analysis of the proposed approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce and study Set-Size Dependent Combinatorial Multi-Armed Bandits (SSD-CMAB) where each base arm is associated with a set of different reward distributions, and the reward distribution of each base arm depends on the set size. The authors propose the SortUCB algorithm, leveraging the order preservation property to reduce the exploration space and provide theoretical upper bound regret guarantees. Moreover, a lower bound is derived showing that SortUCB is relatively tight. Finally the authors conduct some numerical experiments, showing good performance of SortUCB."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Missing discussion/contrast/comparison with closely related works:**\n\nIt is fine that a related work section is put in the appendix. However, the main paper should cite related works at least briefly and then refer for detailed discussion in the appendix. Hence, closely related topics should not be omitted in the main paper. \n\nA very closely related research to this work are submodular bandits, which are a sort of CMAB, yet with submodular (more general) rewards. In such settings, the reward of arms indeed depends on the set (including its size and more, hence generalizing their setting). While some of these works were mentioned briefly in the Appendix A. We believe, due to its high relevance to their considered setting, it must be clearly discussed and highlighted in the paper with key differences and motivations.\n\nSeveral works have been published tackling the CMAB with general rewards including semi-bandit (like this work) [1, 2] and even for (more general) bandit feedback [3]. These have to be discussed and the work should contrast their results to these works (theoretically and/or empirically). \n\nThe authors should compare their work (at least empirically) to recent works on CMAB where arms depend on the set, such as submodular bandits. For example, given the consideration of a fixed cardinality constraint, recent algorithms, like [3] can be considered for comparison. \n\n[1] Chen, L., Harshaw, C., Hassani, H., & Karbasi, A.. (2018). Projection-Free Online Optimization with Stochastic Gradient: From Convexity to Submodularity. In Proceedings of the ICML.\n\n[2] Takemori, S., Sato, M., Sonoda, T., Singh, J., & Ohkuma, T. (2020). Submodular bandit problem under multiple constraints. In Proceedings of the UAI. \n\n[3] Fourati, F., Quinn, C. J., Alouini, M. S., & Aggarwal, V. (2024). Combinatorial stochastic-greedy bandit. In Proceedings of the AAAI.\n\n**Motivation:**\n\nThe proposed problem can be solved using the same CMAB frameworks which estimates the super arm reward directly but considering LM arms instead of M arms. While the problem shows a linear increase with increased constraint (L). The increase remains linear and in general L is not large. In recommender systems such a constraint is usually much smaller than M (L<<M). Furthermore, submodular bandits can be used to tackled these variable distributions base on the set. Hence, the motivation of the work remains unclear. \n\n**Approach limitations:** \n\nAssume a linear reward function and semi-bandit feedback, unlike other works which tackles non-linear and/or bandit feedback, which limits the applicability of these approaches. (Minor: the authors should be clear about this earlier in the paper, possibly from the introduction).\n\n**Complexity Analysis Missing:**\n\nThe authors do not provide a time and space complexity analysis (even though the algorithm has three different phases, each requiring different complexities, and in some of these requires several comparisons with other arms and super-arms). \n\n**Numerical Analysis Limitations:**\n\nCompare only with two benchmarks. CMAB (2015) and MPMAB (1985). Several combinatorial bandit algorithms have been suggested which includes their proposed setting as a special case, such as submodular bandits (mentioned in the above discussion). \n\nThe number of arms remains limited. L=8 is fine, but larger M should be considered for more realistic comparison. \n\nDiverse synthetic and real-world datasets should be considered. With limited datasets/settings and a few considered methods it is very hard to justify the practicality and good empirical performance of the method. \n\n\n**Minors:**\n\nThere is an error in the abstract for the regret upper bound. Should be delta_L^2 based on their proposed theoretical results. \n\nAlgorithm 1, line 11, mentions delete any base arm. However, it is unclear from where, R or B? I assume from R."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper raises a variant model of traditional CMAB model and gives new algorithms based on sorting."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024setsize,\ntitle={Set-Size Dependent Combinatorial Bandits},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yIbSXuLoO1},\nnote={under review}\n}"
},
"abstract": {
"value": "This paper introduces and studies a new variant of Combinatorial Multi-Armed Bandits (\\CMAB{}), called Set-Size Dependent Combinatorial Multi-Armed Bandits (\\SDMAB{}). In \\SDMAB{}, each base arm is associated with a set of different reward distributions instead of a single distribution as in \\CMAB{}, and the reward distribution of each base arm depends on the set size, i.e., the number of the base arms in the chosen super arm in \\CMAB{}. \\SDMAB{} involves a much larger exploration set of the super arms than the basic \\CMAB{} model. An important property called order preservation exists in \\SDMAB{}, i.e. the order of reward means of base arms is independent of set size, which widely exists in real-world applications. We propose the \\SUCB{} algorithm, effectively leveraging the order preservation property to shrink the exploration set. We provide theoretical upper bound of $O\\left(\\max\\left\\{\\frac{M\\delta_L}{\\Delta_{L}},\\frac{L^2}{\\Delta_S}\\right\\}\\log(T)\\right)$ for \\SUCB{} which outperforms the classic \\CMAB{} algorithms with regret $O\\left(\\frac{ML^2}{\\Delta_S}\\log(T)\\right)$, where $M$ denotes the number of base arms, $L$ denotes the maximum number of base arms in a super arm, $\\delta$ and $\\Delta$ are related to the gap of arms. We also derive a lower bound which can be informally written as $\\Omega\\left(\\max\\left\\{\\min_{k\\in[L]}\\left\\{\\frac{(M-L)\\delta_{k}}{\\Delta_{k}^2}\\right\\},\\frac{L^2}{\\Delta_S}\\right\\}\\log(T)\\right)$ showing that \\SUCB{} is partially tight. We conduct numerical experiments, showing the good performance of \\SUCB{}."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Combinatorial Multi-armed Bandit",
"Set-Size Dependent",
"Online learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/737ff5fb9cc6339f7deb3a2dcd4bfd9c667b1f45.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Set-Size Dependent Combinatorial Bandits"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yIdCQFvbYe | Bayesian Learning of Adaptive Koopman Operator with Application to Robust Motion Planning for Autonomous Trucks | main | Active | Koopman Theory;Motion Planning;Autonomous Systems | applications to robotics, autonomy, planning | 3;5;5;5;8;8 | 2;3;3;3;4;4 | 2;2;3;3;3;2 | 2;3;3;2;3;3 | 2;3;3;3;3;4 | 5.666667 | 3.166667 | 2.5 | 2.666667 | 3 | 0.99083 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. I assume the truck dataset would be heavily biased towards data of the vehicle driving straight with almost constant velocity, which may affect the quality of the model. If there are any effort to combat dataset imbalance, it would be beneficial to discuss.\n2. I’m not sure if the use of variational encoder to simplify online sampling is completely novel, but it would certainly be if this is the authors’ original idea. Otherwise maybe more related literature discussion is needed. I’ll leave this part to be answered by the authors and fellow reviewers during rebuttal phase.\n3. Some ablation studies may be needed. For example, BLAK without adaptation/bayesian learning. I’d like to know how much of the performance of the proposed method comes from the transformer + Koopman, versus how much is from bayesian learning.\n4. Additionally the authors could compare design choices with recent paper on transformer for adaptive vehicle dynamics prediction/control, such as details of how state/action are tokenized, encoding/decoding details, etc. Such as https://arxiv.org/abs/2409.15783, and https://arxiv.org/pdf/2310.08674\n\nOther details\n1. Figure 1 caption is hard to follow. Suggest putting reference symbols (step A, B, C, etc.) on the plot.\n2. The authors start to use “BLAK” to refer to their method rather late into the paper (line 466) without first introducing what the term means."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. In 3.3 the effort to make the algorithm real-time in motion planning by using a variational encoder for action encoding such that sampling can be directly drawn from gaussian normal distribution is interesting and novel.\n2. The writing of the paper is clear and easy to follow.\n3. Combining Koopman operator with transformer-based encoding and adaptive control with Bayesian learning is an interesting paradigm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose to learn a dynamics prediction model that can adapt to different dynamical environment parameters for autonomous truck. The model leverages a transformer-based encoder of state and actions, plus Koopman-operator-based Bayesian learning for online adaptation. The method demonstrates SOTA performance compared to previous Koopman-operator-based approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. More baselines outside of Koopman-based methods may be desired to connect the paper with other adaptive control and dynamics model learning paper, including but not limited to, models like neural ODE, PINN, or other uncertainty-aware approaches such as MC dropout.\n2. No ablation study presents in the paper. See comments below."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Line 145/146 has a typo.\n\nLine 331: why does reducing the prior variance incur a broader posterior? Intuitively, the opposite is true. Do the authors mean increasing instead of reducing?\n\nIs the method comparable to an approach that uses a nonlinear function to propagate the dynamics in the latent space, e.g., \"Dream to Control: Learning Behaviors by Latent Imagination\"?\n\nHow accurate are model predictions using out-of-distribution data? An assessment with a corresponding distinction would be useful.\n\nCan the proposed approach be used for reinforcement learning?\n\nHow does the approach compare to predictive approaches other than Koopman? It would be interesting to see how a one-step predictive method using a Bayesian neural network or Gaussian process performs.\n\nIn the appendix, the authors state that the data is collected using a TD3 agent. I feel that this is relevant and should be mentioned in the main body of text.\n\nThough it only uses a Gaussian process instead of a transformer, the paper \"Gaussian Process-Based Representation Learning via Timeseries Symmetries\" also provides a measure of model uncertainty. How does this compare to the proposed approach?\n\nHow well does the approach perform if the collected data is poor? How does the model perform out of distribution?\n\nHow well does the method scale? Eq. (11)-(13) indicate that the Gram matrix of the data needs to be inverted to compute the posterior, which scales cubically with the amount of data."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well written and easy to read. The problem is well motivated and the literature overview is adequate."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a Bayesian framework for learning a Koopman operator-based predictive model. The model takes states and actions as inputs, allowing it to be used for planning. The authors use a transformer architecture to map the state to an embedding vector. They then use a Bayesian approach to formulate the distribution of the Koopman operator and that of the mapping from latent to state space. The posterior can be computed analytically given the data. To be able to sample efficiently during planning, the authors use a variational auto-encoder, which allows the action to be sampled in the latent space directly, as this corresponds to sampling from a Gaussian."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposed method has no theoretical guarantees. It would be interesting to at least have a discussion on what to expect without any formal result.\n\nThe algorithmic contribution is not very significant, as it consists of building blocks taken from existing methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How valid is the prior being used here? Would there also be ways to incorporate the underlying physics more than isotropic Gaussians?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Indeed, Koopman theory is one of the widely examined topic in robotics, and there, one needs to account for varying dynamics and incorporate uncertainty."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper deals with Koopman theory, where physical dynamics are modelled with data. To account for adapting dynamics, this paper proposes a Bayesian formulation. For example a model of a truck on desert or in snow results in different dynamics due to the distribution shifts, and the paper tries to incorporate uncertainty measures. The method is tested on a real data of a truck, validating the proposed framework."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "However, the paper needs to distinguish better between existing works. There has been active learning paradigms for Koopman theory, and how is this work better or differs? Those aspects should be taken into account for the list of contributions.\nWhen compared to papers in robotic conferences, I think the paper fall short in terms of experimental evaluation, e.g., the paper uses a relatively small data set for the truck dynamics, while Koopman theory is more for learning complicated dynamics like soft robotic manipulators. Moreover, real world experiment should be there to indicate that the proposed method works in practice.\nThe paper’s topic might not also perfectly fit ICLR but rather IROS and ICRA."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Weakness 2: Is this assumption still valid? What is the general problem with a dynamical input? Do you have an idea how to deal with it?\n\nWeakness 3: Is there a theoretical justification for the multistep approach? Does it improve performance even for noise-free datasets? Could you provide more evidence on the benefit of this extension?\n\nWeakness 4: Why is the inverse mapping model considered a linear function? Can you elaborate on the fact the inverse might even not exist?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- In general, the paper is well written, the approaches are well motivated, and the presentation of the methods is clear.\n\n- Based on my knowledge, the modeling of the approximated Koopman operator via Bayesian methods, resulting in a Wishart distribution for the posterior is novel and original. Furthermore, be combining the Koopman operator theory with transformers and the variational autoencoder for motion planning seems to be a smart and beneficial way to address the problem of robust motion planning. \n\n- The simulations results indicate that the proposed method can outperform other Koopman based methods and a standard MLP approach. In this way, it seems to be a significant improvement for this scenario based on this dataset."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose a Koopman-based framework for robust motion planning for trucks. For modeling and prediction of nonlinear dynamics, an uncertainty-aware Koopman operator is introduced. The main idea is that a Bayesian regression model is utilized for the approximated operator, enabling uncertainty quantification based on its posterior distribution. Furthermore, distribution shifts (such as changing road conditions) are addressed by introducing a “changing variable” which detects potential shifts via a likelihood ratio test. Finally, the framework is integrated in a sampling-based motion planer. \n\nThe main contributions are i) a novel uncertainty-aware data-driven Koopman operator (using multi-steps) based on Bayesian methods, and ii) real-time adaptation via distribution shift detections."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors provide a detailed overview about the related state of the art. Reading this section, it seems that man of the “open challenges” that this paper addresses are already solved in some way in the existing literature. I assume that the authors do address existing gaps, but they missed to cleary point out these gaps. Me recommendation: Add some sentences in section 2 explaining the remaining research gaps.\n\n2. 177: “Koopman […] under the assumption that the controls do not evolving dynamically” I’m not sure if I understand this statement correctly. My assumption is that “dynamically” refers to an input that depends on the state, i.e., a feedback controller. Even thought that might be a valid assumption, the title of the paper indicates the framework is for “autonomous trucks” where I assume we do have the feedback loop. \n\n3. Eq (4): From a data-driven perspective with a noise data set, etc, the extension to a multi-step input and output seems to make sense. However, based on the original Koopman theory, the extension seems to be unnecessary. \n\n4. While reading the paper, I stumble across Eq. (5). From my understanding, we do a non-linear mapping from the original non-linear dynamics to a high-dimensional but linear space. However, in eq (4), the authors propose to do a linear mapping form the lifted space to the original space. Maybe it is a misunderstanding, but that makes no sense to me. Furthermore, it is a general challenge to design the mapping that the inverse also exists. I do not see any evidence here that the inverse mapping might exist.\n\nMinor:\n- Citation style is often not correct (citep instead of citet, or the other way around)\n-460: “Finally, While” -> while"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Under the assumption that the noise vectors are i.i.d sampled from a multivariate Gaussian distribution, the learning of the Koopman operator using Bayesian LR model proceeds. \n- How important is this assumption? Is it valid realistically for autonomous driving?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed Koopman-based approach leveraging Bayesian learning for dynamic systems and distributional shifts in such systems is original. The paper is well written with clear presentation. Extending Koopman-operator based modeling to adapt to ucertainties is solving an improtant problem in plannig and control. \n\nThe evaluation of the method is providing a comparison with state of the art methods in the field. A realistic dataset and some other simulated environments are used to benchmark the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a Bayesian Koopman operator for modeling of dynamical systems, that incorporates uncertainty quantifica-\ntion. The goal is to make it flexible enough to deal with distributional shifts, which is achieved with an online adaptation mechanism, ensuring the operator remains responsive to changes in system dynamics. Distributional shifts are detected with a specific change variable. \n\nThe approach is applied to motion planning and evaluated via a dataset of real-world truck dynamics data under varying weather conditions, and on other simulated environments. The original approach (using a transformer-based encoder), and a variant optimized for computational efficiency (variational encoder) are compared to several other Koopman operator-based approaches incorporating uncertainty quantification. The proposed approach shows best performance in the simulations, when it is not reduced for computational performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) I believe there is a piece of information which needs to be added, in particular computational times of the proposed approach. It would be interesting to directly compare the gain in computational efficiency from applying variational encoder. A table showing the corresponding computational times will help understanding the potential of the approach for realistic application. Especially for path planning, the assumptions, the time windows (how long is the sequence), the values of the tempering parameter, and the computational time will help demonstrating the efficiency of the algorithm.\n\n2) Some justification in picking the transformer-based encoder will be helpful. Is there a real benefit of using it, given the overhead and the large amount of data needed to train it? Why using it, if afterwards they have to be reduced to variational encoders with lower performance, and with tempering parameters?\n\n3) It would be useful to see a comparion in the path planning comparison with an approach which is not based on Koopman-representation, such as hierarchical planning (A* and RRT) or policy optimization, or any other approach.\n\n\nMinor:\nL.145 - repetition\nL.200 - adopt --> adopt"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why is \\mathcal{K} of dimension \\eta \\times d?\n\n2. Any comparison with adaptive Koopman operator methods? For example, the papers below.\nhttps://arxiv.org/pdf/2202.09501\nhttps://arxiv.org/pdf/2211.09512\n\n3. How do you deal with error from finite dimensional approximation? This following paper considers it.\nhttps://arxiv.org/pdf/2410.00703"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is in general clearly written, and the idea of incorporating Bayesian learning into adaptive Koopman operators seems novel. The authors also demonstrate improved results for state prediction and motion planning under uncertainties compared to several solid baseline methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers the uncertainty and temporal distributional shift issues in the Koopman operator framework, and proposes to incorporate Bayesian learning to form adaptive Koopman operators. Experiment results on predicting truck dynamics and motion planning are shown to prove effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the idea of the paper seems novel, several key details are missing.\n\n1. The pipeline of the proposed method does not seem clear. For instance, In Fig. 1, how is the embedding from the trajectory encoder combined with the embedding from the action encoder? How does the total loss for training look like given equation (6)?\n\n2. Several assumptions in the theoretical part need justification, for example, in Lemma 3.2, “under the assumption that the number of datapoints N is large”, how is large defined and in practice can this condition be satisfied?\n\n3. The experiment description is not very comprehensive. How are the baseline methods implemented? What is the goal and setting of the motion planning problem? Why were 200 and 300 chosen to be the epoch numbers? None of such information is presented in the main paper or the appendix.\n\nA few typos in the paper:\n1. ‘To mitigate these challenges, To address these challenges’ are repetitive in the Distribution Shift paragraph on page 3.\n2. z_t should be \\tilde z_t on page 4 before equation (3)?\n3. In Table 2, BLAST is not consistent with BLAK that is used elsewhere for the proposed method?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A Bayesian approach to learning Koopman operators is introduced to tackle uncertainty estimation and temporal shifts in dynamical systems, allowing for fast, accurate, and dynamically aware motion planning."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bayesian,\ntitle={Bayesian Learning of Adaptive Koopman Operator with Application to Robust Motion Planning for Autonomous Trucks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yIdCQFvbYe},\nnote={under review}\n}"
},
"abstract": {
"value": "Koopman theory has recently been shown to enable an efficient data-driven approach for modeling physical systems, offering a linear framework despite underlying nonlinear dynamics. It is, however, not clear how to account for uncertainty or temporal distributional shifts within this framework, both commonly encountered in real-world autonomous driving with changing weather conditions and time-varying vehicle dynamics. In this work, we introduce Bayesian learning of adaptive Koopman operator to address these limitations. Specifically, we propose a Bayesian Koopman operator that incorporates uncertainty quantification, enabling more robust predictions. To tackle distributional shifts, we propose an online adaptation mechanism, ensuring the operator remains responsive to changes in system dynamics. Additionally, we apply the architecture to motion planning and show that it gives fast and precise predictions. By leveraging uncertainty awareness and real-time updates, our planner generates dynamically accurate trajectories and makes more informed decisions. We evaluate our method on real-world truck dynamics data under varying weather conditions—such as wet roads, snow, and ice—where uncertainty and dynamic shifts are prominent, as well as in other simulated environments. The results demonstrate our method’s ability to deliver accurate, uncertainty-aware open-loop predictions for dynamic systems."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Koopman Theory",
"Motion Planning",
"Autonomous Systems"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7f36186230531360b26163176485f3ced3dced45.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bayesian Learning of Adaptive Koopman Operator with Application to Robust Motion Planning for Autonomous Trucks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yIlyHJdYV3 | A Unified Framework for Forward and Inverse Problems in Subsurface Imaging using Latent Space Translations | main | Active | Machine Learning;Inverse Problems;Full-Waveform Inversion;Seismic Imaging;ML4Science | applications to physical sciences (physics, chemistry, biology, etc.) | 3;5;5;8 | 4;5;3;4 | 2;3;2;3 | 2;2;2;3 | 2;3;3;4 | 5.25 | 4 | 2.5 | 2.25 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Could you comment on the complexity of the problems and how to \"calculate\" it when choosing the latent space size? How to choose this size for real-world applications and are there any limitations in real-world scenarios?\n2. Why have you decided to remove Invertible X-Net from the experiments in Section 5.1.2.? Have you tried different model sizes for this architecture? What is the relation between the number of parameters in encoders and decoders and the U-Net and how does it influence performance? \n3. It is not clear from the paper what are the benefits of having such a unified framework. Could you comment on the limitations? What would be your recommendation on which architecture to use and when?\n4. Could you comment on the examples in Figure 4. and Figure 8. and the performance of the models achieved in terms of MAE?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is clearly presented and well-organized.\n- Even though the GFI framework is not the first idea that unifies seismic imaging forward and inverse problems (e.g., Auto-Linear [1]), it tries to systematically characterize and unify prior research in this area. The paper answers a few questions that were open in the research field. The paper concludes the following: the size of the latent space should be decided based on the complexity of the problem, the training of the latent spaces should be influenced based on the translation objectives and jointly solving forward and inverse problems is helping the model to achieve better forward solutions. In my opinion, these are answers to important questions that were backed up by the experiments in the paper.\n- The paper introduces U-Net and IU-Net architectures for learning translations between velocity and seismic waveforms in the latent space. This leads to two new architectures for unified seismic imaging, namely Latent U-Net and Invertible X-Net. In comparison, the Auto-Linear framework's architecture comprises two separate linear layers trained for the forward and inverse translations in the latent space.\n- Extensive and systematic experiments. The proposed approach was compared to the existing state-of-the-art methods both for the forward and inverse seismic imaging problems. The proposed method employing Latent U-Net architecture outperforms the existing methods in most of the datasets in both forward and inverse directions.\n\n---\n[1] Y. Feng, Y. Chen, P. Jin, S. Feng, Y. Lin, \"Auto-Linear Phenomenon in Subsurface Imaging\n.\" ICML, PMLR:235, 2024, pp. 13153-13174."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Subsurface imaging is a technique for identifying the geophysical properties of layers underneath the Earth's surface. There are two directions: learning the mapping from velocity maps to seismic waveforms, called the forward problem, and the inverse problem which is the mapping from seismic data to velocity maps. Most of the previous deep learning-based research in the field focuses only on the inverse problem also referred to as full waveform inversion (FWI). This paper proposes a generalized forward-inverse (GFI) framework that unifies both directions, i.e., FWI and the forward problem. The framework builds on the assumption of manifolds and latent space translations, proposing two model architectures for the latter, namely Latent U-Net and Invertible X-Net. Latent U-Net architecture employs two U-Nets for translation between the velocity and waveform latent representations and vice versa, while Invertible X-Net uses a single IU-Net that simultaneously learns forward and inverse translations. The GFI framework encompasses previous works in deep learning for subsurface imaging, at the same time trying to answer questions such as the effect of latent space size, the importance of manifold learning and the value of jointly solving forward and inverse problems. The models were evaluated on the synthetic OpenFWI dataset and their generalization ability was tested on two real-world-like datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Section 5.1.1. \"What is the effect of latent space sizes on translation performance?\" is kind of vague. It states that the ideal size of the latent space should be decided based on the complexity of the problem being solved. It is not clear how to determine the complexity in real-world applications. I think the authors should be more specific in this section.\n- In Section 5.1.2. \"Do we need complex architectures for translations?\", there is a comparison between large and small Latent U-Net. The only difference was the complexity (size) of the U-Nets for the latent space translation and the influence of having skip connections. What is the relation between the number of parameters in encoders and decoders and the U-Net and how does it influence performance? The Invertible X-Net was not included in the experiments and only one size was used throughout the paper. It would be useful to see how its size influences the performance.\n- The Invertible X-Net model achieves worse results than the Latent U-Net model for the inversion problem and often fails to outperform other state-of-the-art methods. In contrast, it outperforms most of the methods for the forward problem. It is not clear from the paper what are the benefits of having such a unified framework. I think the limitations should be stated more clearly in the paper.\n- In the results section, in qualitative comparison, Figure 4. and Figure 8. are missing a measure of error (at least MAE). The picked images should have approximately the same MAE as the reported results in the tables presenting a realistic case and fair comparison. It is not clear whether the presented results were in a sense cherry-picked, e.g., the proposed method achieved better results than the average and other methods below the average for the illustrated example.\n- Instead of having a qualitative comparison of the methods on the two real-world-like datasets, I think it would be much more representative to have a quantitative comparison. Here, the presented examples might also be cherry-picked."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "please see my concerns in the Weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper presents a unified architecture for addressing both forward and inverse subsurface imaging problems, while previous studies typically focus only on inversions. Modeling these problems together within a single framework could offer valuable insights, though it may also introduce potential challenges."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores deep learning methods to address the forward and inverse problems in subsurface imaging by mapping between velocity maps and seismic waveforms. The authors introduce the Generalized Forward-Inverse (GFI) framework, which unifies past approaches and addresses open questions around latent spaces, manifold learning, and model complexity. They propose two novel architectures, Latent U-Net and Invertible X-Net, which achieve state-of-the-art results on synthetic datasets and show promising zero-shot performance on real-world-like datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper’s narrow focus on subsurface imaging may limit its relevance for a broader audience at ICLR. If there are analogous problems in other fields where their framework could be applied? \n2. Comparisons to prior work are outdated, with main competing methods from 2019, like InversionNet and VelocityGAN. The authors are encouraged to consider more recent advances, such as “Physics-Informed Robust and Implicit Full Waveform Inversion Without Prior and Low-Frequency Information (2024)” and other state-of-the-art methods from 2023, for a fairer and more relevant assessment.\n3. While the authors aim to model the forward problem using neural networks, it is uncertain if the learned mapping can capture complex physical dynamics, as seen in the governing equations of seismic data. Extensive out-of-domain experiments are recommended to assess the model’s robustness and generalizability to real-world data.\n\n4. The paper's structure could be streamlined to highlight the proposed methodology. Currently, a substantial background section precedes a brief description of the methods. Specific sections of the introduction and related work and preliminary knowledge could be condensed.\n\n5. The model specifications lack detail; adaptations made to standard UNet for subsurface imaging and the mechanisms enabling X-Net’s invertibility are unclear. Clarifying these aspects and specifying when each model might be preferred would improve understanding.\n6. Experimental comparisons with recent baselines (published within the last 2-3 years) would add rigor to the evaluation, as several recent methods could serve as relevant benchmarks. To name a few, “Physics-Informed Robust and Implicit Full Waveform Inversion Without Prior and Low-Frequency Information 2024”; “Full-waveform inversion using a learned regularization 2023”; “Wasserstein distance-based full-waveform inversion with a regularizer powered by learned gradient 2023”; “Full-waveform inversion using a learned regularization 2023”, and more.\n7. Key numerical results in Section 5 are deferred to the appendix, which reduces clarity. Summarizing key findings directly in the main text would enhance readability.\n8. The Invertible X-Net significantly underperforms other models, raising questions about its ideal use cases. Further context on scenarios where the X-Net's design offers advantages would be valuable."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\t(Quality) High quality of experimental evaluation: Experiments are extensive over a range of datasets including an extensive evaluation of out-of-distribution performance and qualitative results are provided in addition to quantitative results. Hence, the conclusion of generally improved performance seems sound. \n2.\t(Significance) The improvements in performance seem to be significant, however interpretability of how significant could be improved (see weaknesses point 5)\n3.\t(Significance) The finding that jointly solving forward and inverse problems is helpful for the forward problem but not for the inverse problem is interesting. \n4.\t(Originality) The proposed unifying framework helps to quickly understand the different existing approaches and will be helpful for future works to expand the framework or propose improvements within it.\n5.\t(Clarity) The paper is overall well written and easy to follow also for researches not actively involved in the field of subsurface imaging."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new training scheme combined with two new model architectures for the problem of learning the forward and inverse map in deep learning for subsurface imaging. \nThe training scheme consists of jointly learning the encoders and decoders for the two domains (velocity maps and seismic waveforms) with the translation network transforming the latent space representations from one domain to another.\nThe two architectures differ in the characteristics of the translation network with one consisting of two separate unidirectional U-nets (latent U-net) and the other of a single bidirectional invertible network (invertible X-net).\nBoth architectures achieve a what seems to be significant improvement in performance over existing works.\nFinally the paper formulates a unifying framework in which existing approaches can be categorized and distinguished."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major concerns\n\n1. (Clarity) The experiment 5.1.3 addresses the question ‘should we train latent spaces solely for reconstruction?’, which I assume corresponds to the question 3) formulated in line 083 ‘what is the role of manifold learning?’. \n1.1 For increased clarity I’d suggest not changing the phrasing of the questions. \n1.2 The setup is unclear to me. What are training fractions? A fraction of the training set or a fraction of the training time? And why is it instructive to look at 5% and 10% training fraction? \n1.3 So, is the conclusion form the experiment that learning manifolds is not important? It is difficult to grasp the conclusion if the wording is not consistent. Is “influence the training of the latent spaces based on the translation objectives” identical to “not learning manifolds”? \n1.4 To better answer the question I think the following baseline is missing that combines the two alternatives (reconstruct then translate and directly translate) into directly learning the translation networks while having at the same time reconstruction losses (basically what is described in lines 243-245). Seeing how this combination performs would help answering the question if manifold learning helps. \n2. (Originality) The idea of the Latent U-net seems to be very similar to the InversionNet following the comparison in Table 1. \n2.1 The Modelling Mode is the same because the forward and inverse part of the Latent U-net are completely disjoint and hence correspond to an InversionNet and a ForwardNet (as long as no manifold learning is performed). \n2.2 While the latent U-net allows manifold learning, the experimental results pertain to the case without manifold learning same as the InversionNet. \n2.3 Latent space translation is said to be identity for the InversionNet and U-net for the Latent U-net, but couldn’t we also just claim some middle layers of the InversionNet to perform latent space translation? \n2.4 The size of the latent space is low in both cases. \n--> So to my understanding the difference between the two boils solely down to the architectural design and size of the network but not to any conceptual differences. If that is the case, it should be stated more directly in the paper and the exact architectural differences should be explained in more detail. \n3. (Clarity) From Section 5.1.4 it is not entirely clear what the answer to the question raised in the paragraph (Is it useful to jointly solve forward and inverse problem) is. Is the answer that no it is not useful for learning the inverse map but yes it is useful for learning the forward map (both answers based on the findings in Figure 3)?\n\nMinor concerns\n\n4. (Clarity) Please provide links to the exact Section in the Appendix rather than just referencing the entire appendix (e.g. lines 308, 311, 314, 377, 388). \n5. (Significance) Include quantitative scores in the qualitative examples (e.g. in Figures 4 and 8) to give a better understanding of how differences in scores translate to significantly perceptible differences."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Invertible Neural Networks (INNs) often have limited flexibility in adjusting the dimensions of input and output, the number of layers, filters, and convolutional kernel sizes. These constraints can impede performance for complex tasks.\n\nWhen mapping between velocity maps and seismic waveforms—data from two distinct domains (one spatial and the other spatio-temporal), why Invertible X-Net could demonstrate superior performance?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work is the first to investigate key factors influencing mapping performance.\n2. Extensive experiments were conducted to conclude the influence of these factors.\n3. Two novel networks are introduced for mapping between velocity maps and seismic waveforms, achieving SOTA performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work summarizes recent advances in deep learning-based mapping between velocity maps and seismic waveforms, introducing a unified framework (GFI) to systematically characterize prior research. Using GFI, key factors affecting mapping performance are investigated, alongside the proposal of two novel model architectures. Comprehensive experiments reveal the impact of these factors, with the proposed models achieving state-of-the-art (SOTA) performance in both forward and inverse problems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper resembles a survey and might be more suitable for journal submission.\n2. A significant portion of the content is placed in the appendices, making it less reader-friendly.\n3. Aside from extensive experiments on factor influences, the work offers limited novelty and lacks theoretical analysis.\n4. Additional factors should be investigated, such as the robustness of data noise across different network architectures."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Latent space Models for jointly solving forward and inverse problems for subsurface imaging"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Unified Framework for Forward and Inverse Problems in Subsurface Imaging using Latent Space Translations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yIlyHJdYV3},\nnote={under review}\n}"
},
"abstract": {
"value": "In subsurface imaging, learning the mapping from velocity maps to seismic waveforms (forward problem) and waveforms to velocity (inverse problem) is important for several applications. While traditional techniques for solving forward and inverse problems are computationally prohibitive, there is a growing interest to leverage recent advances in deep learning to learn the mapping between velocity maps and seismic waveform images directly from data. Despite the variety of architectures explored in previous works, several open questions still remain unanswered such as the effect of latent space sizes, the importance of manifold learning, the complexity of translation models, and the value of jointly solving forward and inverse problems. We propose a unified framework to systematically characterize prior research in this area termed the Generalized Forward-Inverse (GFI) framework, building on the assumption of manifolds and latent space translations. We show that GFI encompasses previous works in deep learning for subsurface imaging, which can be viewed as specific instantiations of GFI. We also propose two new model architectures within the framework of GFI: Latent U-Net and Invertible X-Net, leveraging the power of U-Nets for domain translation and the ability of IU-Nets to simultaneously learn forward and inverse translations, respectively. We show that our proposed models achieve state-of-the-art (SOTA) performance for forward and inverse problems on a wide range of synthetic datasets, and also investigate their zero-shot effectiveness on two real-world-like datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Machine Learning",
"Inverse Problems",
"Full-Waveform Inversion",
"Seismic Imaging",
"ML4Science"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a256318e40e14357e14ab8d9129675625fbf2bbc.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/7403bce05c40e1fa6d3c4bbacb361819bbd0f1ca.zip"
},
"title": {
"value": "A Unified Framework for Forward and Inverse Problems in Subsurface Imaging using Latent Space Translations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yJ9QNbpMi2 | Brain Mapping with Dense Features: Grounding Cortical Semantic Selectivity in Natural Images With Vision Transformers | main | Active | fMRI;visual cortex;neuroscience;cognitive science;brain;vision transformer;semantic selectivity | applications to neuroscience & cognitive science | 5;6;6;6;8 | 2;4;3;4;4 | 2;3;3;3;4 | 3;3;3;3;4 | 3;3;3;3;4 | 6.2 | 3.4 | 3 | 3.2 | 3.2 | 0.663403 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Was the same adapter used in Figures 1a and 1b? Also, were the adapter parameters frozen in Figure 1b? If they were frozen, why not use clean dense features to train the adapter?\n\nHow does the quality of the dense features impact the adapter's behavior? I suggest adding an ablation study to explore this effect."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This is clearly written paper and makes a clear contribution. The idea of isolating specific visual features to determine selectivity effects in different cortical areas is novel and interesting. The proposed method can be used to explore the selectivity of higher visual cortex with respect to localized scene structure and image properties. This work achieves promising open vocabulary CLIP-based segmentation results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a method, BrainSAIL, which aims to disentangle complex images into their semantically meaningful components and locate them to brain voxels or regions. With the prevalence of pretrained models (trained on very large amounts of data), deep learning offers a promising way to explore how semantics are organized in the brain cortex. Compared to prior methods, BrainSAIL focuses on selectivity in single-object images at the broad category level, thereby enabling a richer decomposition grounded in the full semantic complexity of natural visual experiences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The current experimental comparisons (open-vocabulary segmentation) are limited, which restricts a comprehensive evaluation of the proposed method. The proposed approach appears to closely resemble BrainSCUBA. Certain technical details are unclear, which makes it challenging to fully understand or reproduce the method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. My understanding is that the voxel-wise adapters predict a scalar brain response from an M-dimensional output of the backbone model. How is the voxel-wise adapter then applied to the dense features? I am thinking the dense features have a shape (H, W, M), and the adapter transforms the last dimension to get a scalar (H, W) activation map (figure 1.b). I found this a bit unclear in the paper.\n\n2. The learning free distillation is applying random spatial transformations to the input image, applies the inverse transformation to the features, and then averages them all together. Is my understanding correct? \n\n3. I do not understand what equations 3-5 are about.\n\n4. Figure 4 states that the UMAP basis is computed image-wise. I found it unclear how this was done, and what is the significance of an image-wise UMAP?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well written and clear. The main contribution is the use of model features to create voxel and image-wise spatial contribution maps. This is quite useful as an interpretation technique. The use of pixel-wise metrics like depth, saturation, and luminance are a powerful extension of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Methods:\n- Embeddings from deep neural networks are used to predict fMRI recorded brain-responses.\n- Features are extracted from the model and input to a denoising process. The smoothed features are then input to the fMRI encoder (I think) to create spatial attribution maps.\n- UMAP is applied to the encoder parameters, which uncovers previously reported category selective regions for faces, places, words, food, and bodies.\n- The spatial attribution maps are correlated with pixel depth, color saturation, and color luminance to create voxel-wise correlation values for these image properties. Place areas were found to be more correlated with depth, food areas with color, and OPA with color/luminance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "All of the investigated category-selective areas (food, places, words, faces, bodies) are previously reported. Would be interesting so see this tested on less common or more fine-grained categories.\n\nI found the explanation of the methods is a bit confusing (see questions). Could use clearer high-level descriptions before diving into the finer details.\n\nI think there should be a visual comparison of attribution maps for different voxel categories with the same images. I was able to compare a few in figure 3 since there are some repeated images in words/food and face/body voxels. If the method is working then the word voxels should highlight words more than voxels for other categories. I would expect to see the face voxels highlighting faces a lot more than body parts, and the opposite for body voxels. However in the picture of the baby shared between the body/face voxels I don't see much of a difference in their attribution maps."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A. Public Dataset used."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- “We validate this dense feature mapping method on a large-scale fMRI dataset consisting of human participants viewing many thousands of diverse natural images that span a wide range of semantic categories and visual statistics (Allen et al.,2022).”\n - The work by Kamitani recently showed that UMAP gave something like 40 clusters of semantic diversity at a broad level, forcing us to re-evaluate our assumed understanding of just how semantically varied NSD actually is. Do you believe the findings in the \"spurious reconstructions\" paper have an implication on the notion of semantic diversity as described in your work?\n\n- The introduction has repeated references to BrainSAIL’s dense embedding framework but I’m not sure why this point is continually brought up as the vision model embeddings on the natural image are dense to begin with and I can’t see that the implementation of a step to make embeddings more dense is applied anywhere, so I was a bit lost on this point. You have the adapters and a linear probe and everything always seems to be in a dense (not non-sparse) setting. Some clarification on this point would be welcome in the paper as you seem to mean something different to how I understand the same phrasing.\n\n- Was there supposed to be a section explaining the data choices for the brain data (surface vs voxels, subject-space vs template-space) etc.? This is kind of left to be assumed from other sections of the data but I think a small section in the supplemental outlining the data usage choices more specifically would render the procedure more complete with regard to necessary experimental details. For example, the fact you’re using the masks in Jain et al. (2023) implies working in standard (normalised) template space, but I sort of expected being able to verify this explicitly via some technical description somewhere in the supplemental materials.\n\n- You described how you selected varying functional regions from the dataset (food masks in Jain et al. (2023) and t > 2 in the Stigliani fLoc dataset) but the selected voxels being plotted, the selection of them, is not described and also a piece of missing information. Was this taken by looking at noise ceiling values calculated in NSD and if so, what value was used as the threshold?\n- Lines 427-429: you identify the areas surrounding the FFA to be selective for colour, and point out this corresponds to the food area previously identified by others. Can you elaborate on the apparent contradiction between your assumption that this is actually a food area (identified earlier in the paper) or is it one driven by high colour-sensitivity and therefore might be confounded with highly colourful food images?\n- Figure 7: I don’t understand how these values are calculated and the y-axis is not labelled. I wanted to ask you to be more precise on what “spatial similarity” means in terms of the method use to derive the results.\n- Not quite sure the logic for why the procedure described with the learning-free distillation module results in denoised features. Why does applying a transform, running an image through an encoder, then projecting the results back again, result in denoised features? It seems to work but I just felt like the method was explained in the \"how\" and not \"why\" it works. The paper could be made stronger by adding in some intuition in the relevant section."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- The authors show alternatives to high-frequency artefact removal that are more computationally efficient than other solutions out there, namely by recognising the existing common method of adding register tokens in ViTs can be replaced with their proposed learning-free distillation module, which avoids the need for (computationally demanding) additional training.\n- The authors show that this method is a good way to target high confounds in naturalistic images by studying things like colour confounding with food images, luminance modulations in inside/outside scenes, depth information in varying images containing varying “reach spaces” etc.\n- The paper highlights potential equivalence between representations derived under varying training algorithms and questions what is being learned and the generalisation of features. I found this to be timely as recent discussions with LLMs and the utility of methods like BrainScore have been brought into question. I think we need to be asking these questions across the board and I appreciate this final section of the paper that raises this issue.\n- Visualisations are very nice and clear, making some more computationally dense text a bit more clear with reference to specific examples that underlie various findings (e.g. the effects of no language supervision in DINO and the downstream affects on the derived image maps)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to explore the semantic topography in human visual cortex using fMRI. The main goal is to disambiguate the typically confounded nature of multiple categories which is a feature/issue in naturalistic images. The trend towards naturalistic datasets in recent years necessitates more advanced methods to be able to deal with such confounds and this work takes a good step forward in the right direction. This paper then goes on to look at what regions of human visual cortex are driven by low-level image features, providing a complementary view into how the authors’ method can be used beyond studying categorical selectivity. This method provides a good solution to explore such potential confounding effects in determining what is driven by high or low level visual information.\n\nIt was a very nice paper to read and shows the authors' clear expertise and attention to detail in terms of the questions we should be asking in the community."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- PDF is nearly 50 pages long. While ICLR allows for 10 main pages and unlimited supplementary information, the density of what is provided is a bit on the extreme side. In light of this, I still found myself searching for implementational information that I felt was missing and could be added in a revised version (see *Questions*)\n- I reformulated many of the comments I first wrote as weaknesses into more directed questions in the section below, hoping that these points might be more easily addressable and will hopefully make it into the camera-ready version such that readers have a better experience of the paper by benefiting from some of the points of clarity that I ran into while reading."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "DINO trained without language guidance shows high sensitivity to low-level visual features, does this hurt its performance on high-level semantic selection?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tBrainSAIL can obtain dense embeddings without artifacts and additional training through an efficient learning distillation module for high-throughput presentation of the visual cortex on large datasets.\n2.\tCompared to other studies that used simplified images, BrainSAIL can isolate specific image regions that activate different cortical areas when viewing natural scenes, providing a more complete description of how real-world visual stimuli are represented and processed.\n3.\tThe experiments are very sufficient, especially in the appendix.\n4.\tThis work demonstrated that an artifact-free dense feature map can be derived to explore high-throughput selectivity in the visual cortex."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors proposed a method called BrainSAIL to deal with the problem of multiple categories co-existing in the natural images, combining the image embeddings and dense visual features and further identifying specific sub-regions of each image, which reflects the selectivity patterns in different areas of the higher visual cortex. BrainSAIL can realize the semantic attributes and positioning of related objects in complex images, which helps decompose the high-level visual representation of the human brain."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThis method has a high model dependency (e.g., CLIP, DINO, SigLIP). While this improves efficiency, it limits flexibility to adapt feature extraction to the specific needs of neural regions, potentially constraining the granularity of semantic selectivity.\n2.\tThe method's fMRI training uses specific datasets (e.g., NSD), which may introduce bias, limiting generalizability across different populations or visual tasks—especially when the dataset’s images or semantic information don't cover the full range of possible visual stimuli."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How is it possible to verify that the method is not dominated by the foundation model representation of image rather than actually representing brain semantic representations?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The method was explained clearly\n- Literature review seems exhaustive\n- Claims SOTA (although I couldn't verify this)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a method for mapping the brain activity obtained using fMRI to semantically meaningful features in image space using vision transformer backbones and a parameter-free distillation process for denoising."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-Abstract strats with features of BrainSAIL (first few sentences) before getting into WHAT BrainSAIL is.\n\n-It is not clear to me after training the backbone and distillation, how much of the output result is actually driven by brain data or completely rely on features of the image learned by the frozen foundation model.\n\n-SOTA was claimed but I wasn't able to find any comparison to other relevant methods in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an efficient semantic distillation module and leverage ViTs to investigate selectivity in human visual cortex."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024brain,\ntitle={Brain Mapping with Dense Features: Grounding Cortical Semantic Selectivity in Natural Images With Vision Transformers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yJ9QNbpMi2},\nnote={under review}\n}"
},
"abstract": {
"value": "Advances in large-scale artificial neural networks have facilitated novel insights into the functional topology of the brain. Here, we leverage this approach to study how semantic categories are organized in the human visual cortex.\nTo overcome the challenge presented by the co-occurrence of multiple categories in natural images, we introduce BrainSAIL (Semantic Attribution and Image Localization), a method for isolating specific neurally-activating visual concepts in images. BrainSAIL exploits semantically consistent, dense spatial features from pre-trained vision models, building upon their demonstrated ability to robustly predict neural activity. This method derives clean, spatially dense embeddings without requiring any additional training, and employs a novel denoising process that leverages the semantic consistency of images under random augmentations. By unifying the space of whole-image embeddings and dense visual features and then applying voxel-wise encoding models to these features, we enable the identification of specific subregions of each image which drive selectivity patterns in different areas of the higher visual cortex. This provides a powerful tool for dissecting the neural mechanisms that underlie semantic visual processing for natural images. We validate BrainSAIL on cortical regions with known category selectivity, demonstrating its ability to accurately localize and disentangle selectivity to diverse visual concepts. Next, we demonstrate BrainSAIL's ability to characterize high-level visual selectivity to scene properties and low-level visual features such as depth, luminance, and saturation, providing insights into the encoding of complex visual information. Finally, we use BrainSAIL to directly compare the feature selectivity of different brain encoding models across different regions of interest in visual cortex. Our innovative method paves the way for significant advances in mapping and decomposing high-level visual representations in the human brain."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"fMRI",
"visual cortex",
"neuroscience",
"cognitive science",
"brain",
"vision transformer",
"semantic selectivity"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e89b5e23116723da42e1b3d816d6311a05294d54.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to neuroscience & cognitive science"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Brain Mapping with Dense Features: Grounding Cortical Semantic Selectivity in Natural Images With Vision Transformers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yJAk0n0NyU | BlockDance: Reuse Structurally Similar Spatio-Temporal Features to Accelerate Diffusion Transformers | main | Active | Diffusion Models;Efficient Image and Video Generation | generative models | 5;5;5;5 | 5;4;5;3 | 2;2;3;3 | 2;1;2;2 | 2;3;3;3 | 5 | 4.25 | 2.5 | 1.75 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Key concerns are listed in the weakness section, which mainly involves the novelty, experiment, and demonstration issues. Addressing these concerns, especially regarding the experimental aspect, is most valuable to change my rating. Some demonstration questions are also summarized above, clarifications for these questions will also be appreciated."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea is straightforward and easy to implement, and the paper is well-written. \n\nThe intuitive demonstrations illustrate the generation dynamics of the diffusion transformer, supporting the effectiveness of the proposed strategy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study revisits the feature correlation in diffusion transformers and proposes a straightforward, training-free strategy to accelerate generation by caching and reusing features across time steps. The proposed strategy demonstrates effectiveness through experiments on several generation architectures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the block-wise optimization approach differs from DeepCache, i.e., the authors arguing that DeepCache does not specifically aim at highly similar features for reuse, the core concept of caching intermediate features for reuse is to some extent similar. The implementation and experimental impact of reusing similar features in this work appear somewhat marginal. See comments below for more details.\n\nExperimental Results: While the proposed method shows slight improvements over DeepCache, the advantage remains limited, as seen in Tables 1, 2, and 4.\n\nAs summarized in the Related Works section, many existing approaches aim to reduce the number of sampling steps. Although this study involves dropping specific diffusion blocks within the network, it bears substantial similarity to step-reduction strategies. Comparisons with previous works (e.g., [1]) appear insufficiently comprehensive. Please note [1] is just one of many possible relevant comparisons.\n\nThe research first introduces a manually designed caching strategy, BlockDance, which I would like to consider as a vanilla baseline, and subsequently an instance-wise learnable strategy, BlockDance-Ada, for adaptively reusing features. While this adaptive reuse approach seems reasonable and to be a highlight of the paper, its demonstrated improvement seems limited (Table 4).\n\nMinor Comments:\n\nClaimed Conclusion: “Unlike .., BlockDance prioritizes the identification of the most structurally similar features, referred to as Structurally Similar Spatio-Temporal (STSS) features” Since MSE is used to quantify similarity, it is unclear why these features are referred to as the most ‘structurally’ similar. Could the authors clarify how these features represent 'structural' characteristics?\n\nClaimed Demonstration: “while the deeper blocks shift their focus towards generating more complex high-frequency texture information, such as clouds and crowds within depth of field.” This difference is not immediately apparent to me. Could the authors indicate these distinctions with arrows or highlights?\n\nClaimed Summary: “Several studies (Ma et al., 2024b; Li et al., 2023) have unearthed the existence of redundant features in U-Net-based diffusion models, but their coarse-grained feature reuse strategies include those low-similarity features, leading to structural distortions and text-image misalignment.” Could experimental evidence be provided to support this conclusion?\n\nFigure 5: The text font is too small, which could be enhanced for readability.\n\n[1] Li L, Li H, Zheng X, et al. Autodiffusion: Training-free optimization of time steps and architectures for automated diffusion model acceleration, ICCV, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The training cost of BlockDance-Ada should be analyzed with learn-to-cache [1]. Learn-to-cache techniques involve a significant training phase to optimize cache utilization, potentially leading to longer overall preparation times before deployment. However, a detailed cost comparison highlighting computation hours, resources needed, and efficiency gains would provide a clearer understanding of the cost benefits of BlockDance-Ada.\n\n2. The extra cost associated with employing a reward model during the inference stage, particularly in the BlockDance-Ada variant, is important to evaluate. While BlockDance-Ada introduces adaptiveness for higher content quality, this incurs a computational overhead. \n\n\n[1] Learning-to-Cache: Accelerating Diffusion Transformer via Layer Caching"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The BlockDance-Ada component presents a novel approach by enabling an instance-dynamic caching strategy, which intelligently adapts to the varying complexity of generated content. This adaptive mechanism sets it apart from traditional static caching methods and demonstrates the use of reinforcement learning techniques for efficient computation management.\n\n2. The approach has undergone extensive validation across multiple datasets, including ImageNet, COCO2017, and MSR-VTT. These experiments showcase the algorithm’s generalizability and effectiveness in diverse settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "BlockDance is introduced as a novel, efficient algorithm designed to accelerate Diffusion Transformers (DiTs) without requiring additional training. By caching and reusing Structurally Similar Spatio-Temporal (STSS) features, BlockDance reduces redundant computations during inference, making it adaptable and compatible with a range of models in a plug-and-play manner. The approach also explores instance-specific optimization through BlockDance-Ada, which uses reinforcement learning to adaptively conserve computation based on the complexity of generated content. Extensive validations on datasets like ImageNet, COCO2017, and MSR-VTT across tasks such as class-conditioned generation, text-to-image, and text-to-video demonstrate BlockDance’s ability to achieve 25%-50% faster inference while maintaining quality, with BlockDance-Ada further enhancing output quality under the same acceleration conditions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not adequately address prior research efforts such as FORA [1], Delta DiT [2], and PAB [3]. The motivation highlighted in Figure 2, which describes the reduction of redundant computations, has been extensively covered in these works. Additionally, these relevant studies are not referenced or compared in the experimental section, limiting the contextual understanding of how BlockDance differentiates or builds on these methodologies.\n\n2. The first contribution appears to have a significant overlap with existing work, suggesting that the novelty is relatively incremental. Without a deeper analysis or clearer distinction from prior caching strategies, the innovation might seem limited compared to the foundational ideas already established.\n\n3. The practical impact of BlockDance seems constrained. As seen in Table 4, the adaptive strategy BlockDance-Ada only achieves about a 5% further reduction in computation compared to the standard BlockDance with 𝑁=2, indicating that the improvements, while present, may not justify the additional complexity introduced.\n\n\n[1] FORA: Fast-Forward Caching in Diffusion Transformer Acceleration\n\n[2] $\\Delta $-DiT: A Training-Free Acceleration Method Tailored for Diffusion Transformers\n\n[3] Real-Time Video Generation with Pyramid Attention Broadcast"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- When training BlockDance-Ada, is it possible to learn not only the timestep but also the caching index? Since each Block in DiT performs different tasks during denoising, this could be very beneficial. \n- Can BlockDance be applied to UNet-based Diffusion Models as well, rather than just DiT?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper is well-written and easy to understand. \n- Experiments were conducted on various datasets, including video generation datasets like Open-Sora, not just for image generation.\n- The proposed BlockDance-Ada is novel."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method called BlockDance to accelerate the denoising process of DiT. BlockDance caches specific feature maps from the DiT model, allowing them to be reused at the next timestep and thereby skipping computations from previous layers. Additionally, the authors’ proposed BlockDance-Ada enables the use of an optimal caching strategy tailored to each data sample."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Novelty Issue:** Both the proposed feature similarity and caching methods have already been introduced in previous works [1], [2]. The main contribution of this paper appears to be the proposal of BlockDance-Ada. However, the experimental results provided are limited, and the performance improvement is marginal. For instance, in Table 4, BlockDance-Ada shows a 0.15s latency reduction compared to BlockDance (N=2), but with a 0.02 increase in FID. This minor improvement may simply reflect a trade-off between latency and FID. \n- **Lack of Comparative Experimental Results:** The paper does not provide sufficient evidence that BlockDance is genuinely faster than existing methods. To accurately compare performance, an evaluation on a Pareto curve with latency and FID as the axes would be more informative. \n- **Lack of Persuasiveness in Fig. 5:** Figure 5 is unconvincing. At first glance, it suggests that the diffusion model maintains a high level of similarity across nearly all timesteps.\n- **Missing Comparison with Recent Methods:** The paper lacks a comparison with recent caching-based acceleration methods, such as [2] and [3]. \n\n[1] Xu, Mengwei, et al. \"Deepcache: Principled cache for mobile deep vision.\" Proceedings of the 24th annual international conference on mobile computing and networking. 2018.\n\n[2] So, Junhyuk, Jungwon Lee, and Eunhyeok Park. \"FRDiff: Feature Reuse for Universal Training-free Acceleration of Diffusion Models.\" arXiv preprint arXiv:2312.03517 (2023).\n\n[3] Li, Senmao, et al. \"Faster diffusion: Rethinking the role of unet encoder in diffusion models.\" arXiv e-prints (2023): arXiv-2312."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How was the inference latency evaluated? Are they all performed on A100 GPU? How is it affected by parameters like batch size?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. easy to follow\n\n2. BlockDance-Ada can significantly recover the performance drops of BlockDance.\n\n3. The experiments include DiT, PixArt, and open-sora, making the evaluation comprehensive and convincing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a training-free acceleration method for DiTs based on caching, named BlockDance. It caches and uses similar features during the later stages of denoising to reduce computation. BlockDance-Ada further uses a lightweight network to learn instance-specific acceleration strategies. They achieve 25-50% acceleration."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed method accelerates DiT-based models by only about 30% while incurring obvious performance degradation, making its practical application questionable especially compared to other techniques like quantization and distillation. Even the BlockDance-Ada version in Table 4 does not show impressive results.\n\n2. BlockDance-Ada leverages reinforcement learning. What's the training cost?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024blockdance,\ntitle={BlockDance: Reuse Structurally Similar Spatio-Temporal Features to Accelerate Diffusion Transformers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yJAk0n0NyU},\nnote={under review}\n}"
},
"abstract": {
"value": "Diffusion models have demonstrated impressive generation capabilities, particularly with recent advancements leveraging transformer architectures to improve both visual and artistic quality. However, Diffusion Transformers (DiTs) continue to encounter challenges related to low inference speed, primarily due to the iterative denoising process.\nTo address this issue, we propose BlockDance, a training-free approach that explores feature similarities at adjacent time steps to accelerate DiTs.\nUnlike previous feature-reuse methods that lack tailored reuse strategies for features at different scales, BlockDance prioritizes the identification of the most structurally similar features, referred to as Structurally Similar Spatio-Temporal (STSS) features. These features are primarily located within the structure-focused blocks of the transformer during the later stages of denoising.\nBlockDance caches and reuses these highly similar features to mitigate redundant computation, thereby accelerating DiTs while maximizing consistency with the generated results of the original model.\nFurthermore, considering the diversity of generated content and the varying distributions of redundant features, we introduce BlockDance-Ada, a lightweight decision-making network tailored for instance-specific acceleration.\nBlockDance-Ada dynamically allocates resources and provides superior content quality.\nBoth BlockDance and BlockDance-Ada have demonstrated effectiveness across diverse generation tasks and models, achieving an acceleration ranging from 25\\% to 50\\% while preserving generation quality."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion Models",
"Efficient Image and Video Generation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a9694deb20cbee77f147d61a2991baa8664067ec.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "BlockDance: Reuse Structurally Similar Spatio-Temporal Features to Accelerate Diffusion Transformers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yJduhi9mDQ | HÖLDER PRUNING: LOCALIZED PRUNING FOR BACKDOOR REMOVAL IN DEEP NEURAL NETWORKS | main | Active | Holder Pruning;Holder iteration defense;backdoor attacks;Deep Neural Networks;backdoor defense | other topics in machine learning (i.e., none of the above) | 3;3;5;5 | 4;4;5;4 | 2;2;4;2 | 2;2;2;2 | 2;2;4;3 | 4 | 4.25 | 2.5 | 2 | 2.75 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "This paper presents a backdoor removal framework based on the Hölder constant, which measures network sensitivity to localized regional changes. Although the approach is intuitive and the evaluation results show promise, there are several places that need improvement to enhance the overall quality of the paper. My questions are as follows.\n\nThe defender threat model described in section 2.5 appears to be weak and susceptible to vulnerabilities. It relies on the assumption of having access to a “clean feature extractor,” which is exclusively trained on untainted samples for the defender's use. In this setup, the authors assume that the defender trains only the classification head on top of the fixed, clean feature extractor for their specific task, with the data used to train this classification head being poisoned. This assumption raises several concerns. First, what data is assumed to train this “clean feature extractor”? Does it need to be trained on the same dataset as the downstream task? According to the authors in Line 24, the considered clean feature extractors are large-scale, pretrained vision encoders such as CLIP. However, such vision encoders already demonstrate robust zero-shot classification capabilities and achieve high accuracy on datasets like CIFAR-10 and CIFAR-100. In these cases, training an auxiliary MLP classifier becomes unnecessary, and mitigating potential backdoor attacks at this stage may lack practical value. Assuming the existence of a clean feature extractor also carries potential risks. Recent studies, such as [1], have demonstrated the feasibility of poisoning large-scale web datasets like LAION[2], which are frequently used for pretraining vision encoders. Moreover, even if trained on clean data, natural backdoors [3] can introduce unintended behaviors that could affect the observations reported in the paper. Thus, guaranteeing the cleanliness of the feature extractor is difficult.\n\nBased on my understanding, the proposed pruning method focuses solely on inspecting neurons within the MLP classifier, under the assumption that the feature extractor is clean. However, the specific configurations used for the baseline methods remain unclear. Most pruning-based backdoor removal methods allow for manual configuration of the layers to inspect. For a fair comparison, all baseline methods should also be limited to operating on the MLP layers only, rather than on the entire network, including the feature extractor. The current text does not clarify the setup chosen for the baselines. If the authors evaluated the baseline methods across the entire network, it could lead to biased results and potentially incorrect conclusions, such as the claim of a 1000x speedup.\n\nI noticed that the ASR (Attack Success Rate) of several well-known attacks reported in Table 1 is significantly lower than what is typically found in the literature. For instance, the ASR for BadNets, SIG, and WaNet are only 82.9%, 43.6%, and 20.4%, respectively, which is much lower than what has been reported in other backdoor removal studies, such as [4,5,6]. Could the authors clarify the underlying reasons for these discrepancies?\n\nThe evaluation results on more advanced backdoor attacks are missing, such as [7, 8, 9]\nThe ablation study for the value of $\\alpha$ used in Hölder constant is missing.\n\n===\nReference \n\n[1] Carlini, Nicholas, et al. \"Poisoning web-scale training datasets is practical.\" 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024.\n\n[2] Schuhmann, Christoph, et al. \"Laion-400m: Open dataset of clip-filtered 400 million image-text pairs.\" arXiv preprint arXiv:2111.02114 (2021).\n\n[3] Tao, Guanhong, et al. \"Backdoor vulnerabilities in normally trained deep learning models.\" arXiv preprint arXiv:2211.15929 (2022).\n\n[4] Cheng, Siyuan, et al. \"UNIT: Backdoor Mitigation via Automated Neural Distribution Tightening.\" arXiv preprint arXiv:2407.11372 (2024).\n\n[5] Zhu, Mingli, et al. \"Enhancing fine-tuning based backdoor defense with sharpness-aware minimization.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n\n[6] Qi, Xiangyu, et al. \"Towards a proactive {ML} approach for detecting backdoor poison samples.\" 32nd USENIX Security Symposium (USENIX Security 23). 2023.\n\n[7] Qi, Xiangyu, et al. \"Revisiting the assumption of latent separability for backdoor defenses.\" The eleventh international conference on learning representations. 2023.\n\n[8] Cheng, Siyuan, et al. \"Deep feature space trojan attack of neural networks by controlled detoxification.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 35. No. 2. 2021.\n\n[9] Zeng, Yi, et al. \"Narcissus: A practical clean-label backdoor attack with limited information.\" Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security. 2023."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The research topic is of importance\n2. The paper is overall well written, and idea is intuitive in general"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Hölder Pruning, a backdoor removal framework designed to detect and eliminate poisoned neurons influenced by backdoor triggers in neural networks. Hölder Pruning divides the model into two distinct components: feature extraction and feature processing. By assuming a clean feature extractor, it concentrates on the feature processing component, identifying compromised neurons by calculating their per-neuron Hölder constant and pruning those with high values. Evaluation on three image classification datasets highlights the method's effectiveness compared to nine baseline backdoor removal techniques. The authors also demonstrate the framework's capability in scenarios where a clean feature extractor is not guaranteed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The threat model is weak and vulnerable.\n\n2. The experiment setup may be unfair when compared to baseline methods.\n\n3. Some evaluation results (e.g., ASR for WaNet, SIG, and BadNets in Table I) do not match existing literature.\n\n4. Missing comparison with important baseline methods and evaluation on more advanced backdoor attacks.\n\n5. Lacks a key ablation study regarding the effect of $\\alpha$ in the Hölder constant.\n\n6. No discussion on potential adaptive attacks against the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q-(1): Many training-time defenses against backdooring attacks have been proposed in the resent. For instance, ABL [1], DBD [2], and CBD [3] propose to train a backdoor-free model with the poisoned dataset without any prior knowledge of the dataset's cleanliness. \nAs the first two works are already considered in this paper, the authors need to rephrase line 128-129. If the definition of \"in-training defense\" is different from these prior work, I would ask the authors to explain the difference for a better understanding.\n\nQ-(3): And at line 148, the final poisoned training dataset is defined as $D' = D \\cup D_{poison}$, which infers that the size of the final training set is larger than the original clean dataset, i.e. $|D'| > |D|$. Most previous backdooring attacks poison a part of the clean samples and yield a poisoned training set the same size as the original training set. Therefore, I would ask, what is the implementation of dataset poisoning? Are all samples used for poisoning actually from $D$?\n\nQ-(4): Does the Robust Accuracy (RA) measure the prediction on all poisoned samples in the training dataset? Or does it measure the accuracy on a test dataset with poisoning but correctly labeled?\n\nQ-(5): In the setting of HP, the clean feature extractor is available, which enables a running time faster than the prior defenses. With the description at lines 241-242, the presence of a clean feature extractor actually indicates that some clean images are available upfront, which first does not match the definition in the defender model at lines 200-201 (i.e. the defender cannot reliably distinguish between clean and poisoned samples). \n\nQ-(8): In Table 1 and Table 2, ANP defense leads to a significant natural performance degradation. In particular with CIFAR10, however, ANP shows the ability to preserve the natural performance in the original paper. Similar results have also been reproduced by the BackdoorBench [4]. Can the authors explain why ANP's performance is relatively low in this paper?\n\nQ-(9): Across Table 1 and Table 2, although the baseline model is the same, the model performance of \"Benign\" training is always different within each dataset. In addition, since Table 2 shows a very high ACC on the GTSRB dataset with \"Benign\" training, does this mean HP in Table 1 actually harms the natural performance a lot?\n\n[1] Li et al., \"Anti-backdoor learning: Training clean models on poisoned data,\" in NeurIPS 2021.\n\n[2] Huang et al., \"Backdoor defense via decoupling the training process,\" in ICLR 2022.\n\n[3] Zhang et al., \"Backdoor Defense via Deconfounded Representation Learning, \" in CVPR 2023.\n\n[4] Wu et al., \"BackdoorBench: A Comprehensive Benchmark of Backdoor Learning,\" in NeurIPS 2022."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper has a good presentation with the assistance of all Figures for the illustration.\n\n- The evaluation involves extensive related defenses and variant attacks, thus resulting in sound comprehensiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Under the assumption that some neurons in a DNN model are responsible for the backdooring attack, this paper proposes a new defense named Hölder Pruning, which identifies neurons related to the backdoor with a high Hölder constant. The defensive procedure of this method consists of two stages: (1) feature extraction, which aims to detect suspicious neurons, and (2) feature elimination, which adopts semi-supervised learning to alleviate the backdoor while pursuing high natural performance. Extensive experiments demonstrate the adaptability of using Hölder Pruning for both post-train defense and end-to-end training with dataset splitting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The authors mention that Hölder Pruning is the first in-train defense (Line 128-129). However, there are already many training-time defenses; some of them are even considered in this paper's evaluation.\n\n(2) In lines 108-109, Hölder Pruning acts without compromising model performance, while the results in Tables 1 and 2 show a degradation of the natural performance.\n\n(3) The dataset poisoning in section 2 is not clear enough.\n\n(4) The evaluation metric of Robust Accuracy (RA) is not clearly introduced.\n\n(5) The Clean Feature Extractor used in HP requires exclusive training on clean images, which seems to violate the threat model.\n\n(6) Some methodological details are missing: \n - The method used for semi-supervised learning\n - The number of neurons to prune, i.e. $p$ in Algorithm 2 and Algorithm 3.\n - The rationale of using $\\alpha = 0.5$\n\n(7) Since the results in Table 5 are different from those in Table 1 or Table 2, the setting of the poisoning rate in Table 1 and Table 2 is missing.\n\n(8) The performance of related work, e.g. ANP, is worse than the original and the reproduced results by other works.\n\n(9) Different benign models are used in Table 1 and Table 2.\n\n(10) The defensive performance on a large-scale dataset, e.g. Tiny-ImageNet, is suggested to provide.\n\n---\nIn the following, further questions related to the above points are detailed with the notion of \"Q-#\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper introduces a pruning technique based on Hölder constants, which effectively removes the neurons most susceptible to backdoor attacks within MLP layers, while assuming the feature extractor layers (such as CNN layers) remain free from such attacks.\n\n2. When an attacker lacks access to the training environment but provides a dataset contaminated with some poisoned samples, the proposed Hölder Iterative Defense (HID) method offers a training approach that mitigates the impact of these poisoned inputs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Hölder Pruning is a new defense mechanism designed to protect deep neural networks from backdoor attacks by detecting and removing neurons that are influenced by malicious triggers embedded in poisoned training data. This method works by dividing the neural network's operation into feature extraction and processing stages, allowing for the targeted elimination of compromised neurons while preserving the model's overall performance. Utilizing the Hölder constant to measure neuron sensitivity and the Fast Gradient Sign Method (FGSM) for identification, this approach has been shown to be more effective and significantly faster compared to existing state-of-the-art defense strategies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The Weaknesses of the \"Holder Pruning\" method are outlined below:\n\n1. **Limited Scope**: The methodology is tailored specifically for Multi-Layer Perceptrons (MLPs). This specialization restricts its application to more advanced neural network structures and other machine learning tasks beyond basic classification.\n\n2. **Incremental Contribution**: The proposed method seems to build incrementally upon existing techniques, such as CLP, with the Holder Pruning method essentially reducing to CLP when alpha equals 1. This suggests that the contribution might be seen as a refinement rather than a novel approach.\n\n3. **Organizational Issues**: The document lacks clarity and coherence. Essential details are deferred to the appendix, making it challenging to grasp the methodology without a thorough examination of supplementary materials.\n\n4. **Hyperparameter Sensitivity**: The selection of hyperparameters, particularly the threshold derived from the analysis of Hölder constants, may lack robustness across different models, datasets, and adversarial conditions. See Line 1175: The threshold is selected by analyzing the density distribution of Hölder constants."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Could you provide any intuition of why the robust accuracy of Holder Pruning is much higher than others?\n\n2. As CIFAR-10, CIFAR-100, and GTSRB are all datasets with small input sizes, it would be interesting to know the performance of the proposed defenses on datasets with larger input sizes, such as ImageNet or Tiny-ImageNet.\n\n3. It seems that the caption of Table 1 is wrong, listing out wrong defense baselines.\n\n4. A typo in Line 211: “an classifier” -> “a classifier”"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. With the help of the clean feature extractor, they train only the classifier on the poisoned dataset, thus condensing the backdoor within neurons in the classifier. That is to say, these neurons overfit to the trigger features and are highly sensitive to input perturbations. — The idea of condensing backdoor within a few neurons is interesting.\n\n2. They propose a metric based on Holder constant to identify the backdoored neurons.\n\n3. In addition to the traditional ACC and ASR results, they also report Robust ACC results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an in-training backdoor defense method, named Holder pruning. They assume the defender has access to a poisoned dataset and a clean feature extractor; the goal of the defender is to obtain a clean classifier. Inspired by the observation that the backdoored models are highly sensitive to small perturbations, they first let the classifier be trained on the poisoned dataset; they then prune the backdoored neurons within the classifier’s neurons according to the Holder constants (which measures the robustness of the neuron output against the perturbed input); by combining the clean feature extractor and the pruned classifier, they finally obtain a clean model. Essentially, they utilize the sensitivity of neurons to perturbations in inputs to identify between clean neurons and backdoored neurons. \n\nIn the case where clean feature extractors are not available, they propose an improved method, named Holder iteration defense, where the key step is using self-supervised learning to get a clean feature extractor first and after Holder pruning, using identified clean data to fine-tune the feature extractor."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper claims itself to be the first in-training defense against backdoor attacks. But there are some existing in-training defenses, e.g., DBD and D-ST&D-BR. According to the description of their threat models, they aim to obtain a clean model given the poisoned training set by designing appropriate model training approaches—which is the goal of in-training defenses.\n\n2. The threat model of this paper looks a bit strange to me. The general threat model for pruning methods is: given a backdoored model, defenders need to figure out how to prune the neurons so that the pruned model is clean. However, the threat model of this paper is: given a poisoned dataset, defenders need to figure out how to train the model so that the final model is clean, which is the threat model of in-training defenses. Using this threat model is totally fine, but they also assume the existence of an additional clean feature extractor, which offers more “tools” for the defenders. It deviates from the threat model of general in-training defenses, i.e., altering from how to train a clean model to how to train a clean MLP, which serves as an unfair setup for other in-training defenses.\n\n3. The idea of utilizing neuron sensitivity to purify the backdoored model is not new, e.g., ANP prunes most sensitive neurons under the adversarial neuron perturbations, which shares similarity with this paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Holder pruning is a computationally efficient defense against backdoor attacks that uses the Holder constant to detect and remove neurons affected by backdoor triggers."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hlder,\ntitle={H\\\"O{LDER} {PRUNING}: {LOCALIZED} {PRUNING} {FOR} {BACKDOOR} {REMOVAL} {IN} {DEEP} {NEURAL} {NETWORKS}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yJduhi9mDQ},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep Neural Networks (DNNs) have become the cornerstone of modern machine\nlearning applications, achieving impressive results in domains ranging from com-\nputer vision to autonomous systems. However, their dependence on extensive data\nand computational resources exposes them to vulnerabilities such as backdoor\nattacks, where poisoned samples can lead to erroneous model outputs. To counter\nthese threats, we introduce a defense strategy called Hölder Pruning to detect\nand eliminate neurons affected by triggers embedded in poisoned samples. Our\nmethod partitions the neural network into two stages: feature extraction and feature\nprocessing, aiming to detect and remove backdoored neurons—the highly sensitive\nneurons affected by the embedded triggers—while maintaining model performance\nThis improves model sensitivity to perturbations and enhances pruning precision\nby exploiting the unique clustering properties of poisoned samples. We use the\nHölder constant to quantify sensitivity of neurons to input perturbations and prove\nthat using the Fast Gradient Sign Method (FGSM) can effectively identify highly\nsensitive backdoored neurons. Our extensive experiments demonstrate efficacy of\nHölder Pruning across six clean feature extractors (SimCLR, Pretrained ResNet-18,\nViT, ALIGN, CLIP, and BLIP-2) and confirm robustness against nine backdoor\nattacks (BadNets, LC, SIG, LF, WaNet, Input-Aware, SSBA, Trojan, BppAttack)\nusing three datasets (CIFAR-10, CIFAR-100, GTSRB). We compare Hölder Pruning to eight SOTA backdoor defenses (FP, ANP, CLP, FMP, ABL, DBD, D-ST)\nand show that Hölder Pruning outperforms all eight SOTA methods. Moreover,\nHölder Pruning achieves a runtime up to 1000x faster than SOTA defenses when\na clean feature extractor is available. Even when clean feature extractors are not\navailable, our method is up to 10x faster."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Holder Pruning",
"Holder iteration defense",
"backdoor attacks",
"Deep Neural Networks",
"backdoor defense"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d42e913d66e2a45f96660d225c19211d4b650d9e.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "HÖLDER PRUNING: LOCALIZED PRUNING FOR BACKDOOR REMOVAL IN DEEP NEURAL NETWORKS"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yLYMFRZkdU | SimpleStrat: Diversifying Language Model Generation with Stratification | main | Active | Diverse Generation; Large Language Models Sampling; Stratified Sampling | probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.) | 3;3;5 | 4;4;3 | 2;3;2 | 2;2;2 | 3;2;2 | 3.666667 | 3.666667 | 2.333333 | 2 | 2.333333 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. SimpleStrat includes three stages but there is no analysis about how each stage affects the generation performance. What’s the quality of auto-stratification? What if the generated stratification is wrong? Besides, heuristic estimation seems unnecessary: we can always randomly sample a strat and use it for prompting. No ablations provided throughout the paper.\n2. In the first stage, we could produce multiple dimensions for the stratification. Does the number of dimensions matter? How would it affect the performance?\n3. Apart from the KL divergence and recall, what’s the precision of model’s generation? There is no direct evaluation about the model's quality although the authors claim SimpleStrat doesn’t hurt quality.\n4. What’s the unique rate for a model's generation over the 100 samples? i.e. how many unique samples?\n5. CoverageQA seems not very practical. What if applying SimpleStrat to more practical tasks, such as reasoning? It’s common to sample multiple responses in resonsing tasks and report pass@K? would SimpleStrat be better in such problems?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The proposal of SimpleStrat, an interesting idea for increasing generation diversity.\n* CoverageQA, a simple evaluation benchmark for LLM generation diversity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents SimpleStrat to increase the diversity of LLM generation without hurting the quality. SimpleStrat is a prompting method, consisting of three stages: auto-stratification, heuristic estimation, and probabilistic prompting. The rough idea is to first prompt LLM itself to produce conditions that split the generation space into different partitions, and then combine user query with the condition for the final generation such that the output follows a given partition. For evaluation, the authors propose CoverageQA. Experiments show SimpleStrat achieves good performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Lack of ablation\n* Evaluation is weak"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1) One of the contribution of this paper is the creation of a dataset CoverageQA for diversity measurement, which can benefits this area as there're not many existing datasets.\n2) The proposed method is interesting and leverages LLM itself for diversity improvements. One of the advantages of this idea is that with the improvement of the LLM per se, the generation diversity can also be improved.\n3) The proposed approach is evalauted on both open-sourced and proprietary models, and the experimental results show its efficacy in diversity improvements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Generation diversity of LLMs is an important research areas for several reasons such as better downstream accuracy, generate diverse datasets for post-training, RLHF, etc. Traditional approaches for diversifying include temperature scaling, beam search, in-context learning, etc. However, those methods have their own drawbacks such as lower generation quality, limited diversity improvement, etc. This work proposes a new approach for improving diversity called SimpleStrat. SimpleStrat uses the language model itself for diversity improvement. It includes three stages: 1) auto-stratification, 2) heuristic estimation, 3) probabilistic prompting. Auto-stratification prompts the LM to identify promising dimensions of diversity. Heuristic estimation estimates the joint distribution for each stratum. At probabilistic prompting stage, a set of stratum is sampled and a probabilistic prompt is formed. After a prompt is sampled from the probabilistic prompt, it's used as input into LLM for generation.\nTo evaluate the proposed approach, CoverageQA dataset is adopted which includes two splits: 1) CoverageQA and CoverageQA-Wikipedia. To measure diversity of the generation, it computes the KL divergence between MAP distribution of answers to a uniform distribution over all valid answers. For models that MAP distribution can't be calculated, it measures the model's coverage via recall of ground-truth solutiosn over 100 samples.\nThe main contributions of this work include: 1) a dataset of under-specified questions, 2) the SimpleStrat method, 3) the experimental results show that the proposed method improves diversity, specifically 0.36 KL divergence reduction on average and 0.05 increase in recall."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) For evaluation, this work mostly focuses on underspecified question, however, it's not clear how this can impacts the model performance on questions with one answer.\n2) The experiments consists of one baseline and this baseline is not LLM based. There're several LLM based approach. It would be better to compare the proposed approach with them.\n3) How does this approach impact accuracy? This question is not discussed and answered in the work.\n4) One question I have for the proposed approach is how applicable it is to other areas besides question answering such as code generation, agents, etc. One relevant question is that [the Figma AI tool keeps generating the same weather app without diversity](https://siliconangle.com/2024/07/02/figma-disables-new-ai-tool-repeatedly-cloned-apples-weather-app/). Is it possible to use the proposed approach for solving this problem?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could you please provide more evidence of lower temperature for SimpleStrat? Please refer to the 3rd point in the weaknesses.\n2. Based on my understanding, \"coverage diversity\" measures the recall of valid solutions, reflecting the stability of LLM generation instead of diversity. Could you please interpret more of its rational?\n3. Based on Figure 3, the answer distribution of SimpleStrat is far from uniform distribution, diveraging from the assumption. This seems to indicate the ineffectiveness of stratified sampling. Could you please provide some clarification?\n4. l424: I'm confused about the relationship between \"the product of the individual next-token probabilities\" and diversity. At least, length-normalization should be used to eliminate the effects of answer lengths.\n5. KL divergence only indicates uniformality among all the possible solutions provided by the model, rather than within the whole solution space. This makes it unsuitable to measure the diversity in solution space.\n6. This method needs to generate strata dimensions for each prompt.\n-\n 1. For generations with few tokens, it's not clear if this method is more effective than temperature sampling at the same token cost. \n 2. For generations with many tokens, it's not clear whether an easy dimension partition still exists."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The motivation, splitting the answer space into subspace (i.e., strata) along the automatically identified dimensions, is intuitively effective for higher diversity.\n2. The diagram and examples clearly illustrate the main concept of the core idea."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces SimpleStrat, a training-free sampling method for increased diversity of LLM generation, and validates its effectiveness on a self-constructed dataset named CoverageQA, containing 105 under-specified questions. Both the method and dataset contribute to the research community."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Despite the intuitiveness, the motivation needs to be verified, at least empirically, for broader impact. Otherwise, this paper is limited to a method and a small dataset without inspiring insights, leading to limited contribution.\n2. Despite the verified effectiveness, the proposed method is only verified on a small dataset. The generalization and robustness is not clear. This makes the paper more like a prototype without substantial validation.\n3. Despite the drawbacks of temperature sampling appointed by the paper, temperature sampling is still used in SimpleStrat. At least, a lower temperature in SimpleStrat should be demonstrated.\n4. The writing still needs to be further polished, like:\n-\n 1. l046, l124: delete the final period.\n 2. l102: grammar error for \"verified to be a valid without\"\n 3. l135: citep for \"Lowerre & Reddy (1976).\"\n 4. l181: grammar error for \"the given an answer it\"\n 5. l196: grammar error for \"As illustrated in 2, SimpleStrat consist of three stages\"\n 6. l218: grammar error for \"LLMs can used in\"\n 7. l337: wrong statement for \"On the right\"\n 8. l351: grammar error for \"increasing the temperature past 1 there is\"\n 9. l236: Now that \"For simplicity, we focus in this work on settings where all solution in the solution space is equally like.\", Heuristic Estimation is not used in the main experiments, and better to be moved from a separate subsection to a discussion section."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose SimpleStrat for diversifying LLM generations and introduce CoverageQA a benchmark of underspecified questions for evaluating diversity."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024simplestrat,\ntitle={SimpleStrat: Diversifying Language Model Generation with Stratification},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yLYMFRZkdU},\nnote={under review}\n}"
},
"abstract": {
"value": "Generating diverse responses from large language models (LLMs) is crucial for applications such as planning/search and synthetic data generation, where diversity provides distinct answers across generations.\nPrior approaches rely on increasing temperature to increase diversity. However, contrary to popular belief, we show not only does this approach produce lower quality individual generations as temperature increases, but it depends on model's next-token probabilities being similar to the true distribution of answers. We propose SimpleStrat, an alternative approach that uses the language model itself to partition the space into strata. At inference, a random stratum is selected and a sample drawn from within the strata.\nTo measure diversity, we introduce CoverageQA, a dataset of underspecified questions with multiple equally plausible answers, and assess diversity by measuring KL Divergence between the sampling distribution and uniform distribution over valid ground truth answers. As computing a posterior probability for proprietary models is infeasible, we measure recall on ground truth solutions.\nOur evaluation show using SimpleStrat achieves higher recall by 0.05 compared to GPT-4o and 0.36 average reduction in KL Divergence compared to Llama 3."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diverse Generation; Large Language Models Sampling; Stratified Sampling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b148500d14dd7dc575c78b63f6b4e477c87f7ba8.pdf"
},
"presentation": null,
"primary_area": {
"value": "probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "SimpleStrat: Diversifying Language Model Generation with Stratification"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yLhJYvkKA0 | On the Price of Differential Privacy for Hierarchical Clustering | main | Active | Hierarchical clustering;differential privacy;sparsest cut | alignment, fairness, safety, privacy, and societal considerations | 5;6;6 | 4;3;3 | 3;3;2 | 3;2;4 | 3;3;4 | 5.666667 | 3.333333 | 2.666667 | 3 | 3.333333 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Can you develop a lower bound family of graphs on a fixed topology which all have edge weights above 1, and incur at least the stated multiplicative error?\n\nIs it possible to obtain a stand-alone utility theorem for the proposed algorithm, which interpolates between \\log n / \\epsilon multiplicative error and n^2 / \\epsilon additive error for graphs with edge weights at least 1 vs. arbitrary graphs? You might have to \"unpackage\" Proposition A.6 so that it can handle additive error.\n\nFor the experiments, what is the smallest value of epsilon for which the utility of the proposed algorithm becomes \"bad\"? What is the value of epsilon such that the utility approaches the optimal cost? Consider increasing the range of epsilons used in the plots."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed algorithm is simpler and much more efficient than that in prior work. The multiplicative error is not too high for the graphs with edge weight at least 1, which is also a nice result and improvement over prior work. The reduction to balanced cut is an interesting new result."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes several results furthering hierarchical clustering with differential privacy. The notion of privacy is weighted differential privacy, where the graph topology is public, and the weights of the edges are private. First, an efficient algorithm obtaining a \\frac{n}{\\epsilon} \\log n approximation ratio for graphs in which the minimum edge weight is at least 1 is proposed. The authors show that in the worst case, the \\omega(n^2 / \\epsilon) additive error lower bound from previous work still holds for the weaker notion of weighted DP. They make the interesting observation that the lower bound carries over to DP balanced cut via a reduction. Finally, they run experiments showing that the proposed mechanism is indeed feasible."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The lower bound in a way misses the spirit of the proposed algorithm: It shows that there are adversarial graphs (with edge weights possibly zero) which require additive \\Omega(n^2 / \\epsilon) noise, but the upper bound applies to graphs whose edge weights are all at least 1. As these types of graphs are central to obtaining the upper bound, it would be more interesting to see what the lower bound is for graphs which also satisfy this property.\n\nThe utility of the proposed algorithm for graphs whose edge weights are close to zero is currently undefined, meaning the algorithm is not robust for graphs which fall even a little outside the constraints. This can be reconciled somewhat by combining this algorithm with previous work, but then the efficiency improvements do not hold.\n\nThe experiments look interesting, though there is an unexplained phenomenon that the proposed algorithm barely shows any sensitivity to epsilon: it achieves nearly the same error on both the minimum \\epsilon = 0.1 and maximum \\epsilon = 2.0 tested. This is surprising, since typically epsilon dramatically affects the performance of any private algorithm, and typically as \\epsilon becomes large, the cost should approach the cost of the best tree. Neither of these things is happening in the current plots; I think they should be double-checked."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please see above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Despite the complicated problem setting and technical components, this paper has a clear presentation.\n2. In addition to establishing an upper bound, the authors also derive a matching lower bound that aligns with previous work, which presents an additive error of $\\Omega(n^2/ \\epsilon)$ and a new lower bound of $\\Omega(1/ \\log ^{2}n \\epsilon)$ for balanced sparsest cuts in the weight-level DP model. These findings offer valuable insights into the comparison between edge-level and weight-level differential privacy models. The lower bound proofing technique is novel."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the challenge of achieving differential privacy in hierarchical clustering, under a model that assumes edge weights as sensitive information. The authors successfully show there exists an algorithm in this setting that achieves $O(\\log^{1.5} n/\\epsilon)$\nmultiplicative approximation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Currently, I do not see the reason why in the input graph G adding an extra additive weight of $ 10 \\log \\frac{n}{\\epsilon} $ would be necessary, can the authors give some comments on this?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please comment an the above weaknesses (especially important ones)."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper addresses a significant and complex topic that holds potential benefits for the community. The work is well written, well organized and, based on my level of reading, presents a sound approach to the problem. The core concept is clearly presented, and the technical contributions do not look trivial to me. Overall, the presentation and flow of ideas make the paper engaging and accessible (with a small criticism that I provide below), providing a very nice reading experience."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenge of performing hierarchical clustering (HC) on a graph while ensuring differential privacy (DP). Traditional approaches to differentially private clustering on graphs, particularly under edge-level DP, have recently been shown to incur high errors, which limits their practicality. To improve upon this, the paper proposes an alternative approach using weight-level DP, where only the edge weights (not the topology) represent sensitive information.\n\nWhile weight-level DP is less commonly studied than edge-level DP, it is not entirely novel. Previous works have defined neighboring graphs under weight-level DP by assuming identical topologies but allowing edge weight differences up to an $\\ell_1$ norm of 1. In this study, the standard weight-DP definition is adapted by adding a unit-weight assumption, requiring all edges to have weights of at least 1. For clarity, this adapted model will be referred to as “weight-DP-bis”. This adjustment enables significantly more practical error bounds, which are leveraged in the main contributions.\n\nFirst, the paper introduces a polynomial-time algorithm that achieves an approximation of $O(\\log^{1.5} n / \\epsilon) $ for hierarchical clustering under the $\\epsilon$-weight-DP-bis model. The paper further justifies the unit-weight assumption in weight-DP-bis by demonstrating that without it, an HC algorithm that satisfies $\\epsilon$-weight-level DP must incur an additive error of at least $\\Omega(n^2 / \\epsilon)$.\n\nAs an additional contribution, new lower bounds are established for balanced sparsest cuts under weight-level DP, which may have implications for broader applications beyond HC. \n\nFinally, empirical results support the algorithm’s effectiveness on both synthetic and real datasets, showing performance close to non-private benchmarks and scalability across a range of graph sizes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have a few points that could be regarded as potential areas for improvement or clarification in the paper. Please note that these comments are based on my understanding of the material, and they reflect my personal perspective. I do not claim to be a specialist specifically in hierarchical clustering (HC). It is possible that I have misunderstood some aspects, and I am certainly open to revisiting my viewpoint during the discussion phase if necessary.\n\n- **(Minor)**: First, while the paper is quite well written, it may benefit from being more accessible to a general audience. Specifically, it would be helpful to provide brief reminders of key graph-theoretical concepts earlier in the paper. Some concepts, such as sparsest cuts, are not introduced in the main body, while others, such as HC and Dasgupta’s cost, are defined only on page 5. Given that the introduction spans four pages, it may help readers unfamiliar with these concepts to have them presented earlier or concisely defined within the introduction itself to avoid missing key discussions in the first half of the paper. Additionally, the introduction could be shortened to accommodate these clarifications.\n\n- **(Important)**: Second, while I understand that the unit-weight assumption offers better error bounds from a technical standpoint, it may fundamentally modify the problem. Specifically, assuming the $\\ell_1$ norm of differences is less than 1 and simultaneously that each edge has a minimum weight of 1, means that two graphs are neighboring only if their weight functions differ on a single edge. As I see it, this implicitly combines aspects of both the $\\ell_1$ and $\\ell_0$ norms in the privacy definition. The paper may benefit from contextualizing this adapted definition with practical scenarios beyond the initial one presented in [1] (and paraphrased on line 77), as these may no longer directly apply. Additionally, there are existing variations in weight-level DP definitions that might be interesting to discuss in the paper, where the $\\ell_1$ norm is that replaced with the $\\ell_\\infty$ norm. Relevant references include [2,3,4,5].\n\n- **(Important)**: Third, it seems that the paper does not compare to a closely related problem: computing minimum spanning trees under weight-level DP. This problem has been studied under both $\\ell_1$ and $\\ell_\\infty$ norms in [1,2,3]. I believe that this line of work is relevant to this paper because, as far as I understand, computing a weighted minimum spanning tree can be closely related to the single linkage HC algorithm. Existing studies on this problem provide upper and lower bounds that may be highly relevant here, even if the metric used (e.g., not explicitly Dasgupta's cost) differs. For instance, [1,3] provide matching upper and lower bounds of $\\Theta(n^2 / \\epsilon)$ for the additive error in minimum spanning tree computation under weight-level DP with $\\ell_1$ norm, which seems related to Theorem 3 in this paper. It think it would be nice if the paper could discuss this connection and, if relevant, compare these results. Similarly [2] adapts the initial definition to use an $\\ell_\\infty$ norm and shows that, similar results hold in the $\\ell_\\infty$ model, but can be circumvented by forcing the $\\ell_\\infty$ norm to be smaller than $\\frac{1}{\\vert E \\vert}$ in the privacy definition. This scheme seems similar to the idea of modifying the neighborhood definition to obtain better errors, but as pointed out in [3], this can be considered as being an alchemical fix.\n\n- **(Medium)**: Finally, the experimental section could be strengthened by including more comparisons to existing methods or baselines. Currently, it appears that the only baseline comparison is with iterative sparsest cut after input perturbation. It might be beneficial to consider additional comparisons, such as those in Imola (2023), which compares against single and average linkage.\n\n- **(Minor)**: A brief stylistic note: while this is a personal preference, I would suggest avoiding exclamation points in scientific writing, as seen in lines 160 and 200. A more neutral tone may be more fitting for a formal context."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024on,\ntitle={On the Price of Differential Privacy for Hierarchical Clustering},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yLhJYvkKA0},\nnote={under review}\n}"
},
"abstract": {
"value": "Hierarchical clustering is a fundamental unsupervised machine learning task with the aim of organizing data into a hierarchy of clusters. Many applications of hierarchical clustering involve sensitive user information, therefore motivating recent studies on differentially private hierarchical clustering under the rigorous framework of Dasgupta's objective. However, it has been shown that any privacy-preserving algorithm under edge-level differential privacy necessarily suffers a large error. To capture practical applications of this problem, we focus on the weight privacy model, where each edge of the input graph is at least unit weight. We present a novel algorithm in the weight privacy model that shows significantly better approximation than known impossibility results in the edge-level DP setting. In particular, our algorithm achieves $O(\\log^{1.5}n/\\varepsilon)$ multiplicative error for $\\varepsilon$-DP and runs in polynomial time, where $n$ is the size of the input graph, and the cost is never worse than the optimal additive error in existing work. We complement our algorithm by showing if the unit-weight constraint does not apply, the lower bound for weight-level DP hierarchical clustering is essentially the same as the edge-level DP, i.e. $\\Omega(n^2/\\varepsilon)$ additive error. As a result, we also obtain a new lower bound of $\\tilde{\\Omega}(1/\\varepsilon)$ additive error for balanced sparsest cuts in the weight-level DP model, which may be of independent interest. Finally, we evaluate our algorithm on synthetic and real-world datasets. Our experimental results show that our algorithm performs well in terms of extra cost and has good scalability to large graphs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Hierarchical clustering",
"differential privacy",
"sparsest cut"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/282b8c2ce32332e762ffd82a960891d3a44230a1.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "On the Price of Differential Privacy for Hierarchical Clustering"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yLmcYLP3Yd | Discrete Neural Algorithmic Reasoning | main | Active | neural algorithmic reasoning;graph neural networks | learning on graphs and other geometries & topologies | 3;5;5;6;6 | 3;4;2;3;4 | 2;2;2;2;3 | 2;2;2;3;3 | 2;3;2;2;3 | 5 | 3.2 | 2.2 | 2.4 | 2.4 | 0.243975 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does the proposed method handle algorithms that require more complex continuous manipulations or aggregate functions beyond simple increments and selections? Can the model be extended to support such algorithms without losing the benefits of discretization and interpretability?\n2. Are there potential strategies to improve training without hint supervision? How does the model perform on tasks without hints when compared to continuous models?\n3. The constraints improve generalization but reduce expressiveness. Is there a way to balance this trade-off by selectively relaxing some constraints?\n4. Can the proposed separation between discrete and continuous data flows be applied to other neural network architectures beyond attention-based models? What challenges might arise in such adaptations?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Originality: The paper presents a novel method that enforces discrete state transitions in neural networks, which is a significant departure from traditional continuous representations. By integrating hard attention and separating discrete and continuous data flows, the authors address key challenges in neural algorithmic reasoning, particularly out-of-distribution generalization, and interpretability.\n- Quality: The experimental results are strong, with DNAR achieving perfect test scores across multiple algorithmic tasks and graph sizes. The comparison with baseline models and state-of-the-art methods demonstrates the effectiveness of the proposed approach.\n- Clarity: The paper is generally well-written and structured. The authors explain their methodology, including architectural choices and training procedures. The inclusion of diagrams and tables aids in understanding the proposed model and its performance.\n- Significance: The work contributes to the field by showing that neural networks can be designed to mimic classical algorithms with perfect generalization and interpretability. This has implications for developing reliable and trustworthy AI systems that can be formally verified."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel approach to neural algorithmic reasoning by enforcing neural networks to operate with discrete states and separating discrete and continuous data flows. The authors propose a model that integrates hard attention mechanisms, feature discretization, and a separation between discrete computations and continuous inputs (scalars). This design aims to align neural network computations closely with classical algorithms and thus improves out-of-distribution generalization and interpretability. The method is evaluated on several algorithmic tasks from the SALSA-CLRS benchmark, including BFS, DFS, Prim's algorithm, Dijkstra's algorithm, Maximum Independent Set (MIS), and Eccentricity calculations. The proposed Discrete Neural Algorithmic Reasoner (DNAR) achieves perfect test scores on these tasks, even on graphs significantly larger than those seen during training. The authors also discuss the limitations of their approach and potential directions for future work."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Expressiveness Limitations: The enforced constraints, such as hard attention and discrete state transitions, limit the model's expressiveness. For instance, in a single message-passing step, the model cannot compute certain aggregate functions, like averaging over neighbors. This restricts the method's applicability to algorithms that fit within these constraints.\n- Scope of Evaluation: The experimental evaluation focuses on specific algorithmic tasks where the proposed method aligns well. It remains unclear how the model would perform on more complex algorithms that require different computational primitives or continuous manipulations beyond simple increments.\n- Training Without Hints: While the method achieves excellent results with hint supervision, training without hints is identified as challenging. This limitation reduces the method's applicability in scenarios where intermediate algorithmic steps (hints) are unavailable.\n- Sensitivity to Hyperparameters: The paper mentions that certain hyperparameters, like the number of discrete states, significantly impact performance, especially when training without hints. However, there is limited discussion on how sensitive the model is to these hyperparameters and the implications for generalization.\n- Presentation Details: While the paper is generally clear, some sections could benefit from additional explanations. For example, the scalar updater's mechanism could be elaborated further to better understand if someone is not in the NAR field."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. I'm very much not an expert in this area, so I did a bit of a literature search on related papers in the field. I found one such paper [1], used a text-based version of the CLRS-3o, called TextCLRS-text. Does it make sense to use that dataset here? Or to compare to the proposed TransNAR architecture?\n\n2. As mentioned earlier, it would be useful to ablate the different algorithmic decisions made in this paper. In its current form, I can't ascertain which of the architectural decisions is responsible for which gains on these datasets."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The approach seems to work much better than the baselines considered, and achieves perfect performance on the two datasets studied in this article."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors make some modifications to Transformer training to make it more effective for algorithmic reasoning tasks. In particular, they constrain it to learn hard attention, and separate discrete and continuous \"flows\" to prevent information loss. Overall these modicfications result in a model than the GIN and PGN baselines they consider."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I found this paper to be difficult to read, but I'm not an expert in the area. It would be useful to see each of the design decisions in this paper ablated, with their corresponding effect on the datasets covered in this research."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Results show perfect performance on SALSA-CLRS and CLRS-30 benchmarks. Could you elaborate on reported results and 0 variance between your seeds given that this is not the case in majority of comparable baselines or ablation experiments? Could you please report variance on results in table 1? \n2. Could you please explain the differences in computed discrete states used for SALSA-CLRS and CLRS-30 tasks as well as how the model performs on out-of-distribution data?\n3. Could you provide a pseudocode behind Discretize_nodes and Discretize_edges functions shown on page 4?\n4. Work mentions modifying hints from the benchmark, could you clarify if this was included for tested baselines as well as how the hints are modified?\n5. Could you further elaborate on the addition of a virtual node in your model (section 4.3)?\n6. Could you provide results of the hyperparameter search mentioned in section 6? \n7. Could you provide further detail on reverse-engineering (perhaps a toy example) mentioned in section 6?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Impressive performance achieved across selected tasks in SALSA-CLRS and CLRS-30 benchmarks\n- Interpretability and simplicity of the proposed model which utilizes set of discrete states to capture continuous inputs using edge priorities\n- Further perspectives on paving way for discrete neural algorithmic reasoners"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a use of a finite set of predefined discrete states and manipulation of continuous inputs to improve limitations of current paradigms in neural algorithmic reasoning such as redundant dependencies in algorithmic data. The method achieves perfect performance on chosen tasks in SALSA-CLRS and CLRS-30 benchmarks and introduces a starting perspective on use of discrete states to pave the way for future methods in discrete neural algorithmic reasoning and their interpretability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There is limited explanation of how the discrete states are computed, the paper extensively discusses related work in sections 1 and 2, perhaps it would be better if this space was used for providing further detail on proposed architecture\n- Work is limited in providing mathematical / theoretical definitions or explanation of discrete state space and manipulation with continuous inputs which is the main novelty"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see my comments in the weakness part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The problem of learning neural algorithmic reasoning is important. I like the overall idea of learning discrete neural reasoners since the previous efforts of learning neural networks with continuous states and or continuous operators failed miserably.\n\n \n\n- The architecture design has some interesting components. For example, the separation of the data and the computational flow that manipulates the data is interesting. In particular, the scalars (continuous input) only affect the computation of attention weights and do not affect the node or edge states. Although designs with the same spirit have appeared before, e.g., in Neural Execution Engines, this part has its own merits.\n\n \n\n\n- The paper is easy to follow and well-written, except that a few technical details are sparse."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies neural algorithmic reasoning and proposes a discrete transformer-based processor architecture. The authors show that the proposed model empirically achieves perfect size generalization on several algorithms picked from the CLRS benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I am concerned about the significance of the contributions. If the claims of this paper are all correct, then we just obtain a recipe for learning a neural reasoner to perfectly mimic a known algorithm. However, to make the learning successful, we need to first run the algorithm to collect the full trace of the execution, i.e., the sequence of intermediate states generated by the algorithm (the so-called hints). In other words, we just perfectly fit the “correct algorithm” using a neural network. \n\n Moreover, I do not see a theoretical guarantee of when this perfect fitting would happen. For example, would it happen on a specific class of algorithms or any algorithm? See my next comment on the claim about “the guarantee”.\n\n This is unsatisfying since the goal of neural algorithmic reasoning is to learn correct algorithms from data without knowing the algorithms. From this perspective, the really interesting and valuable part is the exploration under the no-hints setting. However, in Section 6, the authors did not provide any experimental results and just stated that their model never achieved perfect validation scores. In short, the authors should provide a thorough empirical study of their proposed model under the no-hints setting and compare it with other approaches on benchmarks like CLRS. \n\n \n\n- In Section 5, the claim “we can guarantee that for any graph size, the model will mirror the desired algorithm, which is correct for any test size” is quite strong. In my opinion, such a strong claim needs rigorous theoretical proof. However, the authors only provide some vague arguments to support this claim. \n In particular, can you elaborate on the following two questions?\n 1) How do you confirm that the attention block indeed operates as “select_best” selector?\n I neither see any empirical investigation nor theoretical proof on this point.\n 2) Even if the attention block operates as “select_best” selector, why does this condition lead to the above claim?\n You should at least demonstrate your detailed logic using an example algorithm like DFS.\n\n \n\n- I think the size of the discretized states would matter a lot to the expressivity of the proposed neural reasoner, i.e., what algorithms can be represented by the designed class of neural networks. However, I did not see the experimental study and discussion on its effect.\n\n \n\n- An interesting experiment is to check if there is some sort of phase transition in terms of problem size. Specifically, I would imagine the phenomenon of perfect fitting to the correct algorithm would disappear if we decreased the problem size in training. Then, what is the minimum problem size to ensure it fits perfectly for a particular algorithm?\n\n \n\n- The details of how to train task-dependent encoders and decoders are not provided, and how they affect the processor's training is not discussed at all. I would imagine the quality of the encoded embedding is quite important to the success of learning the processor, even with teacher forcing. \n\n \n\n- In section 3.3, I get the high-level idea that the scalars (continuous input) only affect the computation of attention weights and do not affect the node or edge states. However, the description of the idea in the 2nd paragraph of Section 3.3 is not so clear that I could not figure out how exactly the computation is designed. It would be great to either write the equations and or illustrate the computational graph."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. For the multi-task experiments, do the results in Table 2 correspond to these settings? It is unclear which algorithms were trained simultaneously and how this compares to the baselines’ configurations.\n\n2. Without hints, DNAR struggles to generalize. Could you clarify the reverse-engineering issue described? Was any intermediate approach, such as noisy teacher forcing or teacher forcing decay, tested to examine the impact of hints on DNAR?\n\n3. How were GIN and PGN used as baselines? Were they employed to treat this as an end-to-end node-level prediction task?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The writing is generally clear, though some areas could be further elaborated.\n2. The paper is well-motivated, as both OOD generalization and interpretability in NAR are both important questions. The proposed architecture of separating the discrete and continuous data flows are novel and effective.\n3. The perfect scores across algorithms are impressive, especially given the model’s capacity for size generalization on graphs 100 times larger, outperforming strong baselines.\n4. The proposed architecture consists of three design components, each of which is validated through ablation studies to demonstrate its effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenges of generalization and interpretability in neural algorithmic reasoning (NAR) by introducing a novel architecture that guides the model to follow algorithmic execution as a series of finite, predefined states. The proposed architecture has three main components: feature discretization, hard attention, and separation of discrete and continuous data flows. The empirical results show that the model achieves perfect scores in both single-task and multi-task experiments across various algorithms. Additionally, the architecture enhances interpretability, allowing validation of the correct execution of desired algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the perfect scores achieved in the experiments are impressive, the paper could be strengthened by testing on a wider range of algorithms. Although both parallel (e.g., BFS) and sequential (e.g., Prim) algorithms are covered, most algorithms studied are graph-based, where previous NAR methods have already proven effective. The CLRS-30 dataset includes a broader variety of algorithms (e.g., sorting and search), where many NAR methods can struggle. Although SALSA-CLRS was chosen for its thorough OOD evaluation, testing the model on the CLRS-30 dataset with larger graph sizes would add valuable insights due to the dataset’s extensive algorithm coverage.\n\n2. Another limitation is the lack of application to real-world datasets, as the authors note in the Future Work section. A significant advantage of NAR methods is their ability to operate on high-dimensional data by utilizing a pretrained model that mimic algorithms. This presents an additional OOD challenge with potential distribution shifts in real-world data. Evaluating the proposed architecture on real-world datasets would demonstrate its practical value; even a single real-world experiment, as seen in related works (e.g., Numeroso et al., 2023), could significantly strengthen the method's implications.\n\n3. Although interpretability is a valuable strength of the proposed method, it is unclear how this model’s interpretability, achieved through analyzing state transitions and attention blocks, differs from other NAR approaches, which also allow interpretation of intermediate executions (e.g., using decoder outputs to indicate a node’s current predecessor in a sorting algorithm)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024discrete,\ntitle={Discrete Neural Algorithmic Reasoning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yLmcYLP3Yd},\nnote={under review}\n}"
},
"abstract": {
"value": "Neural algorithmic reasoning aims to capture computations with neural networks via learning the models to imitate the execution of classic algorithms. While common architectures are expressive enough to contain the correct model in the weights space, current neural reasoners are struggling to generalize well on out-of-distribution data. On the other hand, classic computations are not affected by distributional shifts as they can be described as transitions between discrete computational states. In this work, we propose to force neural reasoners to maintain the execution trajectory as a combination of finite predefined states. To achieve that, we separate discrete and continuous data flows and describe the interaction between them. Trained with supervision on the algorithm's state transitions, such models are able to perfectly align with the original algorithm. To show this, we evaluate our approach on multiple algorithmic problems and get perfect test scores both in single-task and multitask setups. Moreover, the proposed architectural choice allows us to prove the correctness of the learned algorithms for any test data."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"neural algorithmic reasoning",
"graph neural networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a0d84bdff959ce5632f191ff9a3b53fc88012973.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Discrete Neural Algorithmic Reasoning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yM7rw8Bo1f | FE-GNN: Feature Enhanced Graph Neural Networks for Account Classification in Ethereum | main | Active | Blockchain;Identity identification;GNN | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5;6 | 4;5;5;4 | 1;2;3;3 | 1;2;2;3 | 2;2;3;3 | 4.25 | 4.5 | 2.25 | 2 | 2.5 | -0.19245 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the above week points."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ The paper addresses an important problem\n\n+ The paper is easily understandable\n\n+ Experiments done on a real ethereum dataset"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a modified graph neural network architecture tailored for identifying node types in Ethereum graphs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The proposed approach seems like incremental improvement over existing GNN approaches\n\n- The results do not provide error bars. It is not clear to me whether the improvement shown is statistically significant.\n\n- Some important baseline GNN models such as Graphsage has not been used. Given the performance of GAT, it is not clear to me whether the other existing GNN approaches perform even better.\n\n- Also some of the node types are easy to classify, e.g., token contracts, by looking at the node feature so it was not clear to me where the improved performance came."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors explain if and how the model's performance might vary with other blockchains?\n2. Is there any potential for applying this model in real-time transaction monitoring or identifying emerging account types?\n3. Would varying the types of features collected (e.g., smart contract interactions) impact the results?\n2. Could the authors provide further justification or a sensitivity analysis for the specific configurations of the convolutional and self-attention layers?\n3. Could the authors explore which extracted features most significantly contributed to the FE-GNN’s classification performance?\n4. Have the authors considered training with imbalanced classes (e.g., phishing, exchanges) to reflect real-world scenarios?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The use of a hybrid GCN-GAT model and focus on feature enhancement is a creative solution that improves classification accuracy by addressing node heterogeneity. The experiments are rigorously conducted, with robust comparisons to existing models. The division of the dataset and metrics used are appropriate, though more detail on feature selection could clarify which elements were most impactful. The methodology is generally clear, but certain technical terms and detailed GNN layer operations may require additional clarification for broad accessibility. More visual aids might benefit understanding. Given the increasing importance of transaction monitoring in Ethereum, this work could have a positive impact on blockchain research, especially in enhancing identity tagging."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel blockchain address identity identification method called Feature-Enhanced Graph Neural Network (FE-GNN) for improving Ethereum account classification. It constructs a transaction graph with over 1 million nodes and 3.7 million edges from Ethereum transactions and applies a GCN (Graph Convolutional Network) and GAT (Graph Attention Network) hybrid model to enhance node representation. The model is evaluated using an extensive dataset containing over a million nodes and achieves notable performance gains over traditional methods for identifying specific Ethereum account types."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper lacks detailed justification for specific hyperparameter values (e.g., convolutional layers, and self-attention heads). A sensitivity analysis could enhance reproducibility and model tuning.\n2. Although the dataset is thoroughly described, more information on how validation and test splits influence model training could strengthen reproducibility.\n3. Adding insights into why certain models were selected would clarify the robustness of the results.\n4. While feature extraction is thoroughly described, including a breakdown of features’ contributions to classification accuracy would add depth to the evaluation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- What is the time complexity?\n- What are the studied time periods? \n- What are the results of a temporal prediction task? You could train a model with data until t and predict crime nodes at t+1. Such an experiment would prove the value of the model."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Blockchain data analytics is an interesting and novel research area. \n- Experiments use related models and compare the results. This is the strongest part of the article."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents Feature Enhanced Graph Neural Networks, a method for identifying cryptocurrency addresses in Ethereum to improve transaction analysis. By building a transaction graph and using graph learning techniques, FE-GNN embeds blockchain addresses, and the embeddings demonstrate superior performance compared to traditional methods in its experimental evaluations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I see two main weaknesses:\n\n- The evaluation of FE-GNN mainly depends on a single Ethereum dataset. For more comprehensive validation, results should be tested on additional datasets, such as the phishing dataset https://www.kaggle.com/datasets/xblock/ethereum-phishing-transaction-network. The dataset is not ours.\n- Even if the dataset limitation is addressed, the approach is iterative and lacks novelty. Using GNNs and attention mechanisms is standard practice. The authors should probably open a new novelty path to publish the article at a premier conference.\n\nAdditional concerns include:\n\n- While the paper mentions 1,124,130 nodes and 3,752,659 edges, it doesn’t specify the time period over which this data was collected.\n- The evaluation overlooks the temporal aspect of transactions, which is crucial for capturing changes in user behavior over time, especially given how quickly trends can shift in the cryptocurrency space.\n- The paper doesn’t discuss the computational costs associated with the FE-GNN method, but the layered use of convolutions and self-attention mechanisms is likely to demand significant computational resources.\n- There is no ablation study to assess the contribution of each component or step in the FE-GNN algorithm. Without this, it’s unclear which parts of the method are driving improvements, making it hard to pinpoint optimal configurations or areas for improvement."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could the author explain why the collected dataset was divided into $D_1,D_2, D_3$ as shown in Table 3? Why not conduct experiments on the entire dataset?\n2. The effort in collecting the Ethereum dataset, which includes 1,124,130 nodes and 3,752,659 edges, is commendable. However, necessary clarifications about the dataset are needed. For example, what is the timeframe of the transactions included in the dataset? Does it cover all transactions during that period? Was any preprocessing done? According to the descriptions in the introduction and methods section, the constructed graph dataset is a heterogeneous graph. What are the proportions of each node category in the 1,124,130 nodes?\n3. Could the author further analyze the fairness of the experiments in Table 4? According to the descriptions, the author considers an eight-class problem (as shown in Table 2). However, as per the experimental section, $T^2A2vec$ and HNRL are phishing node detection methods, i.e., binary classification. How were the experimental settings aligned in this context? If $T^2A2vec$ and HNRL are primarily focused on detecting phishing nodes, is it fair to compare their effectiveness in identifying Exchange, ICO Wallets, and the other seven categories?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The author provides detailed explanations and analyses of label behavior in the appendix.\n2. The author constructed a large-scale dataset. Although it is not publicly available, the effort is commendable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper explores Ethereum network analysis by combining GCN and GAT models to achieve node representation. It introduces a newly constructed large-scale heterogeneous Ethereum dataset, including 1,124,130 nodes and 3,752,659 edges. The paper discusses the methodology for node classification across multiple categories and provides analyses of label behavior in appendix."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of Methodological Innovation: The method mainly combines existing GCN and GAT models to achieve node representation, without proposing any novel methods.\n2. Insufficient Experimental Comparisons: The author dedicates significant space in Table 4 to comparisons with fundamental or classical methods such as LR, DeepWalk, and GCN. Only two Ethereum analysis methods are included for comparison, but there is a lack of comparisons with other SOTA (state-of-the-art) Ethereum analysis methods, such as REF 1-4.\n3. Logical Gaps in Writing: Many parts lack analysis, leaving the reader confused. For example, in lines 269-272, the author states that there are no existing meta-path related experiments on the Ethereum dataset, and hence proposes a specific process in Fig. 1(a). However, there is no discussion on the differences between the specific process proposed and existing processes for other heterogeneous graphs, why existing methods cannot be directly used, what issues they have, why those issues pose challenges, and what specific solutions the author proposes. The internal mechanism of the proposed method is also unexplained. Similar issues are prevalent throughout the paper.\n4. Unclear Explanations in Key Areas: The dataset constructed is a contribution, but essential details are missing. What is the timeframe of the transactions in the dataset? Are they from recent transactions in 2024 or from earlier years? The author does not analyze or consider potential evolutions and changes in transaction behaviors across different years. Since the dataset is highlighted as a key contribution, it is recommended to make it publicly available.\n5. Numerous Minor Writing Issues: For example, commas are needed after formulas 10, 11, and 12. Similar minor issues are present throughout the text.\n\n\n\nREF1. Yang, J., Yu, W., Wu, J., Lin, D., Wu, Z., & Zheng, Z. (2024). 2DynEthNet: A Two-Dimensional Streaming Framework for Ethereum Phishing Scam Detection. IEEE Transactions on Information Forensics and Security.\n\nREF2. Lin, D., Wu, J., Fu, Q., Zheng, Z., & Chen, T. (2024). RiskProp: Account risk rating on Ethereum via de-anonymous score and network propagation. IEEE Transactions on Dependable and Secure Computing.\n\nREF3. Liu, J., Chen, J., Wu, J., Wu, Z., Fang, J., & Zheng, Z. (2024). Fishing for Fraudsters: Uncovering Ethereum Phishing Gangs With Blockchain Data. IEEE Transactions on Information Forensics and Security.\n\nREF4. Wu, J., Lin, D., Fu, Q., Yang, S., Chen, T., Zheng, Z., & Song, B. (2023). Towards Understanding Asset Flows in Crypto Money Laundering Through the Lenses of Ethereum Heists. IEEE Transactions on Information Forensics and Security."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "FE-GNN is a graph neural network based on feature enhancement. It uses the collected transaction data to build a transaction graph and uses graph convolutional networks and graph attention networks to infer the identity of blockchain addresses."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024fegnn,\ntitle={{FE}-{GNN}: Feature Enhanced Graph Neural Networks for Account Classification in Ethereum},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yM7rw8Bo1f},\nnote={under review}\n}"
},
"abstract": {
"value": "Since the birth of the blockchain cryptocurrency trading platform represented by Bitcoin, cryptocurrencies based on blockchain technology have gained widespread attention and accumulated a large amount of transaction data. The analysis of cryptocurrency transactions has become an important research direction with social and economic value, and an important area of blockchain scientific research. Identifying the identity of different cryptocurrency addresses and understanding their behavior is the core challenge to achieve cryptocurrency transaction analysis, otherwise it is difficult to understand blockchain datasets and analyze them with meaningful results. To this end, this paper proposes a blockchain address identity identification method called \\textbf{F}eature \\textbf{E}nhanced \\textbf{G}raph \\textbf{N}eural \\textbf{N}etworks (FE-GNN). Specifically, a transaction graph is constructed based on the collected transaction data, and graph learning techniques based on graph convolutional networks and graph attention networks are used to infer the blockchain address identity. Experimental results show that the FE-GNN algorithm outperforms previous algorithms."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Blockchain",
"Identity identification",
"GNN"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d241dee42e603fea26b906cca9091f897cfa1116.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "FE-GNN: Feature Enhanced Graph Neural Networks for Account Classification in Ethereum"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yMHe9SRvxk | Human-Feedback Efficient Reinforcement Learning for Online Diffusion Model Finetuning | main | Active | Online RLHF;Diffusion Model Finetuning | generative models | 5;5;6;6 | 3;3;4;2 | 2;2;4;4 | 2;3;3;3 | 3;4;4;3 | 5.5 | 3 | 3 | 2.75 | 3.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weakneses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "* The article is well-structured and easy to follow, and the motivation is clear. The approach is simple but effective. \n\n* The proposed method seems to be novel. And the empirical results on several tasks demonstrate the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces HERO, a framework for fine-tuning Stable Diffusion (SD) models using online human feedback to improve alignment with human intent. HERO addresses the limitations of traditional methods, which rely on costly predefined rewards or pre-trained models, by leveraging real-time human feedback through two main components: Feedback-Aligned Representation Learning and Feedback-Guided Image Generation. Experiments show that HERO is more efficient than prior methods in tasks such as anomaly correction, reasoning, counting, personalization, and reducing NSFW content, achieving significant improvements with minimal feedback."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Compared to D3PO that does not require specific reward model, the proposed method in this paper clearly make the training process more complex and introduce computational overhead.\n\n* The proposed method uses online human preferences. Does that mean the human annotator need to provide preference to the generated image at each run of the stable diffusion model? If so, it is might be difficult to collect enough data for training the encoder as contrastive learning requires a large amount of data to converge. Additionally, how to measure the performance of the trained encoder $E_\\theta$?\n\n* D3PO seems to be the closest baseline and achieves second-best results in Figure 3. Why the authors not provide results of D3PO in Table 2?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Some questions and suggestions for the authors to consider:\n1. Expand evaluation to include more diverse and complex tasks, particularly multi-object scenarios\n2. Will the authors consider performing a thorough analysis of diversity with established metrics to better understand the trade-offs in quality?\n3. Is there a plan to establish structured protocols for feedback collection that involve multiple evaluators to enhance reliability and reduce biases?\n6. Would the authors be able to incorporate an analysis of failure cases along with strategies for mitigation in future work?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- **Originality**: HERO presents a unique extension of binary signal methodologies to continuous reward signals, effectively merging representational learning with reinforcement learning to enhance the alignment of generated images with human feedback.\n- **Clarity**: The paper is well-written, with clearly labeled diagrams and detailed qualitative examples that illustrate the feedback process. The structured presentation of the HERO framework, including its iterative feedback mechanism, allows readers to easily grasp the method's operation. The paper also includes many qualitative examples, showcasing the benefit of the HERO pipeline\n- **Performance**: Results demonstrate that HERO significantly enhances sample efficiency and alignment compared to previous methods, highlighting its practical impact in T2I generation tasks.\n- **Flexibility across tasks:** The results suggest that the pipeline can be widely applied to a wide range of tasks - such as content safety improvement to reasoning-based generation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces HERO, a novel framework designed for fine-tuning diffusion models using human feedback, aimed at improving text-to-image (T2I) generation tasks. HERO uses feedback-aligned representation learning to create a latent representation space guided by human annotations. Human evaluators categorize generated images into “best,” “good,” or “bad,” to guide a contrastive learning process that constructs an embedding space. Triplet loss is applied to align embeddings of “best” and “good” images while distancing “bad” images, resulting in a reward signal that guides the model toward human preferences. The framework employs DDPO (Diffusion-based Policy Optimization) for updates and uses LoRA for parameter efficient fine-tuning. At inference, images are sampled from a Gaussian mixture model based on the noise latents of “good” and “best” images from previous iterations, balancing quality and diversity. Experimental results indicate that HERO achieves high success rates across various T2I tasks, demonstrating both sample efficiency and superior performance compared to other feedback-guided methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Task Diversity and Complexity**:\n - Evaluation is conducted across only five T2I tasks, which is significantly less than comparable works like D3PO, which evaluated across 300 prompts.\n - Tasks are primarily simple single-object scenarios and do not encompass multi-object compositions or complex interactions. Expanding to more challenging tasks would improve the robustness of the findings.\n\n2. **Insufficient Diversity and Convergence Analysis**:\n - The paper lacks a quantitative analysis of the diversity-quality trade-off, particularly missing comparisons between non-fine-tuned and feedback-guided generators.\n - There are no established metrics for evaluating mode collapse or potential overfitting to ideal seeds, which could limit the practical application of the generator.\n\n3. **Concerns Regarding Human Feedback Methodology**:\n - Results are reported based on a limited number of human evaluators, with each evaluator responsible for different seeds. This implies a lack of inter-annotator agreements and introduces potential biases from relying on individual evaluators, which could skew the model's alignment and generalization capabilities.\n - There is also limited information on evaluation reliability measures, such as the criteria used for selection."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. How can HERO be adapted to remain robust against noisy or contradictory feedback, which is common in real-world scenarios?\n2. Could there be a hybrid approach that integrates automated feedback mechanisms with human judgments to reduce dependency on constant human input while retaining the benefits of nuanced understanding?\n3. How transferable is the HERO framework across different domains or types of generative models? Can the principles applied here be adapted for use in non-visual tasks, such as text generation or music synthesis?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. Efficiency in Feedback Use: HERO significantly reduces the need for human feedback instances by using them more effectively compared to previous methods, such as D3PO.\n2. Direct Use of Human Judgments: By converting direct human feedback into learning signals without the need for pre-trained models, HERO simplifies the training process and potentially increases the model's responsiveness to nuanced human evaluations.\n3. Improved Learning from Sparse Data: The methodology allows for effective learning even when limited data is available, which is a critical advantage in scenarios where generating or collecting extensive labeled datasets is impractical or impossible."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces HERO, a new framework for fine-tuning Stable Diffusion models using online human feedback efficiently. HERO integrates two novel components: Feedback-Aligned Representation Learning and Feedback-Guided Image Generation. These components are designed to maximize learning efficiency by converting human judgments into informative training signals, thereby reducing the reliance on large pre-trained models or extensive heuristic datasets. The model demonstrates significant improvements in online learning efficiency, requiring considerably fewer instances of human feedback compared to previous methods, while effectively enhancing image generation aligned with human preferences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Algorithmic Complexity: The incorporation of sophisticated mechanisms like contrastive learning and feedback-based sampling may introduce complexity that complicates the model's implementation and optimization, potentially requiring specialized knowledge or resources to manage effectively.\n2. Sensitivity to Feedback Quality: The performance of HERO heavily depends on the relevance and accuracy of the feedback provided. Inconsistent or poor-quality feedback could mislead the learning process, leading to suboptimal or biased model behavior."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Listed in the weakness section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper is a clear and structured presentation, which makes it easy to understand the proposed methodology and its underlying concepts. \nThe experiments cover a diverse set of four T2I tasks and transferability and validate the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors introduce HERO (Human-feedback Efficient Reinforcement learning for Online diffusion), a novel method for enhancing text-to-image (T2I) models using limited human feedback. The approach leverages human evaluation to refine image quality: in the data collection phase, a human annotator labels a batch of generated images with positive and negative feedback, selecting the single best image among them. HERO employs a triplet loss function to train a visual encoder by mapping embeddings based on these annotations. A reward signal, derived from the cosine similarity between the learned representation of an input image and the selected best image, guides the model optimization and image generation process. The model utilizes Proximal Policy Optimization (PPO) to apply Low-Rank Adaptation (LoRA) to a stable diffusion model. Experimental results demonstrate that HERO outperforms baseline approaches, achieving higher success rates in generating preferred images."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A primary concern with the paper is that the T2I model's performance is only assessed through task success rates. Important factors such as image diversity and aesthetic quality are not quantitatively evaluated, which are crucial metrics and should be included, as seen in the baseline D3PO [1].\n\n2. Additionally, the proposed method requires extra human labeling to identify the best image in each batch, which introduces additional information. This requirement makes direct comparison with other baselines less equitable, as they may not rely on such intensive human input.\n\nGiven these concerns, I would currently not recommend acceptance of this paper.\n\n**Reference**\n\n[1] Yang, Kai, et al. \"Using human feedback to fine-tune diffusion models without any reward model.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Fine-tuning Stable Diffusion with minimal online human feedback, achieving 4x more efficiency in controllable generation compared to previous methods."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024humanfeedback,\ntitle={Human-Feedback Efficient Reinforcement Learning for Online Diffusion Model Finetuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yMHe9SRvxk},\nnote={under review}\n}"
},
"abstract": {
"value": "Controllable generation through Stable Diffusion (SD) fine-tuning aims to improve fidelity, safety, and alignment with human guidance. Existing reinforcement learning from human feedback methods usually rely on predefined heuristic reward functions or pretrained reward models built on large-scale datasets, limiting their applicability to scenarios where collecting such data is costly or difficult. To effectively and efficiently utilize human feedback, we develop a framework, HERO, which leverages online human feedback collected on the fly during model learning. Specifically, HERO features two key mechanisms: (1) Feedback-Aligned Representation Learning, an online training method that captures human feedback and provides informative learning signals for fine-tuning, and (2) Feedback-Guided Image Generation, which involves generating images from SD's refined initialization samples, enabling faster convergence towards the evaluator's intent. We demonstrate that HERO is 4x more efficient in online feedback for body part anomaly correction compared to the best existing method. Additionally, experiments show that HERO can effectively handle tasks like reasoning, counting, personalization, and reducing NSFW content with only 0.5K online feedback."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Online RLHF",
"Diffusion Model Finetuning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/97d64724089bc74d89d3b82ab4456f6a53f7bd5a.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Human-Feedback Efficient Reinforcement Learning for Online Diffusion Model Finetuning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yNZi38u52U | Model Cautiousness: Towards Safer Deployment in Critical Domains | main | Withdraw | cautiousness;calibration;out-of-distribution;safety | probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.) | Gianluca Detommaso | ~Gianluca_Detommaso1 | 3;3;3;6 | 2;4;4;4 | 3;1;2;3 | 1;1;2;2 | 3;2;2;3 | 3.75 | 3.5 | 2.25 | 1.5 | 2.5 | 0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Given the ratings, I withdraw this paper."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can the formulated problem easily extend to handle a broader range of OOD situations?\n- Are there any baselines that also account for model cautiousness? If so, does the proposed method still achieve superior performance compared to those baselines?\n- What are the limitations of the proposed method or the formulated problem?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Paper Writing: The paper has a coherent narrative and logical flow. The motivation for introducing the concept of model cautiousness is well-explained, with examples that illustrate the concept effectively. The schematic illustrations are clear and well-designed, allowing readers to quickly grasp the main ideas.\n- Innovative Topic: The study of model cautiousness is both intriguing and innovative, offering a new perspective on evaluating a model’s confidence in its inferred outcomes.\n- Reasonable Proposal: The proposed cautious confidence model and its component design align well with the formulated problem and are supported by theoretical foundations.\n- Evaluation: The proposed method is evaluated on an extensive range of datasets, consistently demonstrating superior results compared to standard calibration procedures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the concept of model cautiousness, encouraging the model to maintain high uncertainty when inferring out-of-distribution (OOD) samples, even if its inference results are correct. To achieve this, the proposed cautious confidence model decouples the confidence generation model from the discrimination model, and formulates the detection of an OOD sample as a binary event. Additionally, a new metric called Expected Cautiousness Error (ECauE) is proposed to evaluate the model's performance in terms of both caution and calibration. The proposed method is evaluated on 12 question-answering and 37 vision datasets. Based on the results, the paper claims that the method demonstrates superior model cautiousness compared to standard calibration procedures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Problem Setting: While I acknowledge the innovation of this paper's topic, I feel that formulating OOD detection as a binary event is overly simplistic. Intuitively, it seems necessary to consider different OOD scenarios, such as samples that are not part of the training distribution but belong to the same category (as in the evaluations in this paper) versus samples from entirely different categories. Additionally, OOD literature distinguishes between near-OOD and far-OOD settings. If the problem formulation considered multiple OOD types rather than treating it as a binary event, it could potentially be more impactful and offer deeper insights.\n- Lacking Baselines: Although the proposed method is compared to standard calibration procedures across extensive datasets and shows superior performance, it lacks a comparison with baselines that also emphasize model cautiousness. Such a comparison would better demonstrate the proposed method's effectiveness in advancing model cautiousness.\n- Missing Definitions: Some notations or abbreviations are undefined, such as PRAUC. I assume this refers to AUPRC (area under the precision-recall curve), which is more commonly used.\n- No Discussion on Limitations: The paper lacks a paragraph that comprehensively discusses potential limitations, either in the problem formulation or in the proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses section, I have listed a detailed write up of my issues with the paper there."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The concept that uncertainty over OOD data should be treated differently than uncertainty over IID data is a neat, systems level way of thinking about model safety.\n- The experimental evaluation has broad coverage of datasets for 2 tasks and 1 model for each task."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "- Motivating Claim: It is desirable that models be calibrated on in-distribution data and exhibit higher (perhaps total) uncertainty on OOD data.\n- A notion called \"cautiousness\" is introduced, that is a combination of ECE on IID data and requires high uncertainty on OOD data.\n- The authors define the ECauE (expected cautiousness error) metric.\n- The authors introduce a method for fitting an cautiousness estimator atop a pretrained model.\n- They show that the estimator they define (X2-95) achieves a better score on the metric (ECauE) they define."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Metric is not justified strongly and makes the paper's story circular\nThe paper uses circular reasoning in the sense that the authors define a metric, and then propose a method which improves over a baseline w.r.t to the proposed metric. The only attempt made to justify their formulation of the metric is briefly, informally, in the introduction. The argument given is roughly that \"we think this is how models should behave, and here is a metric that captures whether they behave that way\". **What is the motivation for the metric other than the opinion of the authors? Is there some concrete benefit to models behaving this way?**\n\n\n### No use of contemporary OOD / Calibration work\nThe metric basically comes down to being calibrated for IID inputs and being uncertain for OOD inputs. There is a well-established line of research on OOD detection. There is also a well established line of research on model calibration. This suggests a natural baseline, which is to stick well-known OOD detection baseline in front of a model that some well known calibration method has been applied to and simply hardcode low confidence for OOD inputs and calibrated confidence for IID inputs. Why not do this? I don't get the point of introducing some new method when tons of OOD detection methods already exist. There are also no experiments done to show how the new OOD detection method does compared to existing OOD detection works. \n\n### Operational use of OOD seems unjustified\nTake Table 1. What makes the datasets you've chosen to be OOD actually OOD for the model? Shouldn't OOD be w.r.t to the pretraining or at least instruction tuning distribution of the model? I don't get how you can say that xyz dataset is OOD for a model. **I don't think calibrating a LLM on one dataset makes a bunch of other datasets OOD for that LLM — what if the LLM was trained on similar data?**\n\n### Selective prediction already answers the motivation, but no mention in paper\nSelective prediction is a well-established and active line of work (https://arxiv.org/abs/1901.09192, https://arxiv.org/abs/2306.08751, https://research.google/blog/introducing-aspire-for-selective-prediction-in-llms/). Selective prediction requires a model to abstain when it does not know the answer to a query. A model with perfect risk vs coverage for selective prediction would never make mistakes — if it answered, whether on OOD or IID settings, it would be right. It's a strictly stronger notion than ECE, IMO. What does the introduced metric offer that selective prediction does not? Ultimately I don't think we care about metrics for the sake of metrics. We care about what they can be used for. If this metric could be used for selective prediction, then I could see a nice use case for it. Otherwise, it just seems to be a roundabout way of doing OOD detection. **If a large, pretrained model has really good accuracy and calibration on OOD data, is the data really OOD?**\n\n### Summary\nThe problem with the work is that it takes a well-known notion (ECE) and says that a model having good ECE on OOD data is bad (why...?) and then develop a _new_ method for OOD detection without comparing it to others (why?). IMO having good ECE on OOD data suggests that the data is not really OOD. In any case, you can't define OOD for a large pretrained model declaratively by saying \"this data is OOD\", there _must be a reason that model is OOD other than you saying it is_ — especially since these models have probably been trained on similar data to standard benchmarks (e.g. https://arxiv.org/abs/2406.04244v1). Finally, **I think selective prediction already subsumes the use case for this metric without needing to reference the notion of OOD data** (which I think is the big weakness in the argument of the paper): we simply want the model to know when it knows the answer and when it does not know."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "I don't have any ethics concerns"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Why is Bernoulli formulation in line 145 needed?\n- In Figure 3 EcauE is low on OOD and high on ID, should not it be vice versa, i.e. models are well calibrated on ID and not calibrated on OOD."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "I could not find any strengths in this paper, unfortunately."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new concept of “cautiousness” which measures two things: how calibrated a model is on ID and how close to random guess it is on OOD. A measure of cautiousness - ECauE is proposed as well as an algorithm to compute it. In addition to the base classifier f this algorithm requires training a separate OOD detection model g and ECauE is computed jointly for f and g."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "## Questionable motivation\nThis paper is trying to solve a task of making a model calibrated on ID while making random predictions on OOD at the same time. \nI do not understand why this task needs to be solved at the first place. It is way too complex because it entangles two unsolved problems: model calibration and OOD detection. I believe it is better to solve these problems separately.\n\n## Unrealistic assumptions\nTo solve the proposed problem authors make very unrealistic assumptions:\n- For simplicity, they assume that the classification task is binary (line 128), which is not the case for the most of the real world scenarios (e.g. ImageNet for computer vision). Could the authors please explain whether it is possible to remove this assumption and keep the theory behind the method work (I am asking because results are computed for ImageNet and other non-binary datasets in Table 1 by redefining Y as an indicator of correct prediction)?\n- They assume that embeddings of ID data follow normal distribution (lines 255-256) while embeddings of OOD data don't. Could the authors please provide any evidence that justifies this for realistic cases (e.g. ImageNet)?\n\n## Flaws in crucial definitions\n- The definition for target labels in classification is not properly given. Namely, it is not clear whether \"y\" is class label or probability that model is correct (as said in line 303).\n- It is not well defined what kind of data is considered ID and OOD in this paper.\n- It is not clear how models are calibrated on ID. It is a crucial part of the ECauE computation, but I only found the phrase: “use a standard calibration approach” in line 236. Same goes for details of fitting g(x) model.\n- H(p, q) is divergently defined as probability in Eq. 5 line 192 and as an expectation in line 202.\n- Why is expectation needed in Eq. 7, isn't the following always true: h(x) = p(1-q) + 1/2q when f(x) = p, g(x) = q? It is confusing, because it seems that this equation always holds, and therefore, all models can be called \"cautious\" according to the given definition.\n- I don't understand how eq. 5 is made, let's define A := (Y=1), B := x is OOD, C := (f(x) = p), D := (g(x) = q), then after conditioning on B we have: P(A | C, D) = P(A | C, D, not B)*(1 - P(B)) + P(A | C, D, B)*P(B), while you say that P(A | C, D) = P(A | C, not B) * (1 - P(B | D)) + P(A | C, B) * P(B | D).\n\n## Weak empirical results\n- The main results (Table 1 for QA and Figure 2 for vision) don't provide the downstream task performance (QA results also lack ECE). Performance should be provided because it is important to remember that we care about calibration and random predictions on OOD only when models capable of solving the downstream task, ECE should be provided to verify that lower ECauE does not break the model calibration. \n- The main results for vision provide ECE and show that adding OOD detection model can increase as well as sometimes decrease ECE (see Figure 2 left - one point might have higher ECE for (f,g) than for (f,0) as well as lower). Such an increase of ECE violates the goal of the proposed method: keep models calibrated on ID.\n\n## Computational limitations\nI think that the proposed method is not usable in practice due to its computational burden. The suggested method for computing ECauE - a measure of \"cautiousness\" - requires training an OOD detection model g in addition to the base model f (see Algorithm 1). To compute ECE, for example, no additional models need to be trained.\n\n## Lack of explanation\n- Why do you need Cholesky decomposition in Algorithm 2?\n\n## Unclear writing\nPRAUC in line 338 is not defined.\n\n## Typos\n\n- 142: we are confidence\n- 223: in turn\n- 266: describes\n- 276: Choleskly → Cholesky"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please clarify each of the 3 weaknesses above. Thanks!\n\nThe PDF does not have hyperlinks on sections/equation references nor on citations. This makes the reading **extremely** tedious."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Clear writing.\n\n- Extensive evaluation, very large number of datasets and models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents several (somewhat independent) technical contributions:\n(C1) A new concept, \"cautiousness\", that refers to a model being calibrated on ID data while being uncertain on OOD data.\n(C2) A method to recognize ID vs OOD inputs.\n(C3) A method to achieve their concept of \"cautiousness\" by combining the uncertainty of a given model with the method (2)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My concerns/questions are at a high level, regarding the motivation for this work and the differences with existing concepts. I may be missing something, so I'm keen to update my score if the authors can clarify the points below. In any case, the paper will need clarifications about these points.\n\n- (W1) The related work says that this work differs fundamentally from existing work that aims to ensure OOD calibration, because this one is about being always uncertain when OOD (if I understand correctly). But the former seems to be much more useful, and make more sense for the following reason. \"OOD\" is not a binary property as correctly noted in Section 3). For example, in the case of covariate shift, one cannot strictly classify one example as ID or OOD, and with images, since they're high dimensional, any unseen test example is outside the convex hull of training examples. This is why it seems to make more sense to aim for the same thing (calibration) whether ID and OOD, rather than aiming for complete uncertainty whenever OOD (as proposed, if I understood correctly?).\n\n- (W2) The contribution C2 seem to have a lot in common with existing work on OOD detection, which is a large, well-established area of research. The related work mentions several existing methods, but the field is much larger than that. It discusses some technical similarities and differences with a few existing methods, but it does **not** discuss the need for a new method (C2) . The proposed method (C2) is presented as \"a model that discriminates between ID and OoD inputs\". But to me this is exactly what OOD detection is. So why don't the authors just call \"a new OOD method\"? I may be missing something. But in any case, the need for this method (given the plethora of existing ones) should be made clearer.\n\n- (W3) The need for the contribution C1 is not clear to me. It feels like putting a new name (and defining a combined metric) for two existing areas (calibration and OOD detection). Is the concept of \"cautiousness\" an academic curiosity, or does it answer a need of ML practitioners in the evaluation and/or deployment of ML models? Since this paper seems to address a very practical problem, I think the authors should first describe how the challenges of calibration/OOD inputs are handled in current industrial ML deployments, and then discuss which issues in these practices need new solutions. The authors do provide a \"motivating example\" in the introduction, but the two-moon dataset did not convince me at all that the paper was adressing a concrete necessity."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce the concept of model cautiousness, which requires a model to be simultaneously calibrated in-distribution and uncertain out-of-distribution."
},
"_bibtex": {
"value": "@misc{\ndetommaso2024model,\ntitle={Model Cautiousness: Towards Safer Deployment in Critical Domains},\nauthor={Gianluca Detommaso},\nyear={2024},\nurl={https://openreview.net/forum?id=yNZi38u52U}\n}"
},
"abstract": {
"value": "In this paper, we introduce the concept of model cautiousness, which stresses the importance of aligning a model's confidence with its accuracy in in-distribution (ID) scenarios while adopting a more uncertain approach in out-of-distribution (OoD) contexts. Model cautiousness is framed as a spectrum between justified confidence and complete ignorance, induced by the inability to clearly define a model's domain of expertise. We propose a rigorous post-hoc approach to obtain a cautious model that merges the confidence scores of the primary confidence model and a model discriminating between ID and OoD inputs. A metric to measure the cautiousness error of a confidence model is introduced. We further present a simple method for discriminating ID from OoD inputs and providing a meaningful confidence estimate that an input is OoD. Finally, we benchmark our approach across 12 question-answering and 37 vision datasets, demonstrating its effectiveness in enhancing model cautiousness compared to standard calibration procedures."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Gianluca_Detommaso1"
]
},
"authors": {
"value": [
"Gianluca Detommaso"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"cautiousness",
"calibration",
"out-of-distribution",
"safety"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "detommaso|model_cautiousness_towards_safer_deployment_in_critical_domains"
},
"pdf": {
"value": "/pdf/3904663d8717dd7fd24105406052fa7447f7d8c3.pdf"
},
"presentation": null,
"primary_area": {
"value": "probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Model Cautiousness: Towards Safer Deployment in Critical Domains"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
yOOJwR15xg | MeteoRA: Multiple-tasks Embedded LoRA for Large Language Models | main | Active | LLM;LoRA | foundation or frontier models, including LLMs | 3;5;6;6;8 | 4;4;2;3;4 | 2;3;2;3;4 | 2;2;2;3;3 | 3;3;3;3;2 | 5.6 | 3.4 | 2.8 | 2.4 | 2.8 | -0.184637 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "**Questions**\n1. How does MeteoRA perform on out-of-distribution tasks (e.g., compared to baselines such as the base LLM, LoRA-F, and LoRA-B)?\n2. In section 3.3, what is $p_i$?\n3. For measuring forward-pass speed, what is the batch size?\n\n**Suggestions**\n1. Introduction should quantify benefit of MeteoRA beyond speedup (e.g., average accuracy increase).\n2. Fig 1: unclear where the MoE is located, and how experts are selected.\n3. Background: should cite other MoE-based LLMs, such as GLaM (preceded Mixtral), DBRX, and Grok.\n4. Section 3.3 needs more revisions for clarity. While I appreciate the explanation to motivate the kernel design, it took several reads to fully understand the problem with the `loop-original` method, why it is 10x slower, and how `bmm-torch` works.\n5. Figure 8 is hard to interpret. The font size is small and the colors/lines are difficult to distinguish due to small line width and shading. Such a key figure should be better-presented (i.e., bigger, clearer lines, easier to read).\n6. Figure 5: root-of-runtime is a strange (and potentially misnamed) evaluation metric. It would be better to report runtime directly."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. MeteoRA is a general approach to incorporate domain-specific knowledge from multiple LoRAs in a single model.\n2. Extensive evaluation which demonstrates that MeteoRA performs similarly to the PEFT reference implementation, which provides a reasonable upper-bound reference.\n3. The authors explain concerns about runtime and memory-efficiency. Based on this, the authors design, implement, and evaluate a CUDA kernel which addresses the concerns."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces MeteoRA, which automatically applies the appropriate LoRA adapters to a pre-trained LLM based on the current task.\nMeteoRA is an MoE-inspired approach in which a gating function selects the top-k LoRA adapters for each token in each input sequence.\nThe authors demonstrate that MeteoRA performs similarly to an LLM with a handpicked, in-domain LoRA adapter, and provide an efficient\nkernel implementation that addresses runtime concerns and memory overhead."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. All LoRAs are stored in GPU memory, which limits the scalability of the approach. In contrast, S-LoRA (a LoRA serving system) scales to thousands of LoRA adapters by swapping LoRA weights to host memory. Proposing a target range for the # of LoRA adapters or a method to swap adapters to host memory could help address this concern.\n2. MeteoRA model is fine-tuned on a set of LoRAs and their target domains. Consequently, the approach does not efficiently integrate new LoRA adapters.\n3. Capability regression with $k=2$ indicates that LoRA adapters likely interfere with one another. Some discussion of how to mitigate interference or exploit $k=1$ for further speedups could address this weakness.\n4. On a few tasks, MeteoRA performs worse than the baselines (e.g., NewsIT, CNNDM, and TrackObj). An explanation of why this might be the case could help contextualize these results.\n5. No evaluation on how MeteoRA scales to larger batch sizes. It would be interesting to see the relationship between batch size and runtime/memory because larger batch sizes would access more adapters which could impact these metrics."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The b$\\times$s tokens are treated as independent. However, there is concern about potential correlations among tokens. Knowledge across domains can be interrelated, and sentence meaning may depend on context. How do the authors address this issue? Could the assumption of independence negatively affect performance by ignoring relevant interdependencies?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "MeteoRA effectively implements a scalable integration of LoRA while adopting forward acceleration techniques during the inference phase, thereby enhancing the efficiency of the inference process."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents MeteoRA, a framework combining MoE and LoRA to enhance inference efficiency via forward acceleration. It analyzes PEFT methods related to user tasks, integrating multiple LoRA adapters for new tokens and identifying the top-k experts for processing. The authors introduce a batched matrix multiplication (bmm-torch) strategy to enable parallel processing of LoRAs, improving speed and efficiency over sequential methods. In summary, by merging MoE and bmm-torch, MeteoRA significantly accelerates token processing and enhances operational efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper is not very novel, given that using MoE for LoRA is an idea that has already been extensively explored [1,2,3]. It would be beneficial to clearly delineate how MeteoRA compares to and differs from the referenced LoRAMoE works. \n\n- The term \"reuse existing LoRA\" is misleading and unclear; it implies the need for offline training and does not introduce any innovation compared to other MoE methods.\n\n- While the bmm-torch method for parallel processing of LoRA adapters improves forward training, it may increase memory consumption. This approach requires larger memory allocations for concurrent processing, potentially offsetting time savings from reduced sequential processing. Please provide quantitative comparisons of memory usage and speed gains across different batch sizes or sequence lengths to clarify this trade-off.\n\n\n- MeteoRA is presented as an advancement over existing LoRA techniques, a direct comparison with LoRA MoE methods is missing. Such a comparison could underscore the performance superiority of the proposed method. Could the authors conduct and present a detailed comparative analysis with LoRA MoE [1,2,3] methods?\n\nMinior: \n\n-The font size in Figure 3 is too small to read.\n\nReferences:\n\n[1] When MOE Meets LLMs: Parameter Efficient Fine-tuning for Multi-task Medical Applications, SIGIR 2024.\n\n[2] Mixture of LoRA Experts, ICLR 2024.\n\n[3] Pushing mixture of experts to the limit: Extremely parameter efficient moe for instruction tuning. ICLR 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The use of a full-mode MoE architecture to integrate multiple LoRA adapters is a novel contribution, potentially addressing limitations in existing methods like Huggingface PEFT and S-LoRA.\n- The proposed forward acceleration strategies address efficiency challenges in traditional MoE implementations, achieving significant speedups."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces MeteoRA, a framework designed to enhance the deployment of multiple LoRA adapters in LLM through a Mixture-of-Experts (MoE) architecture. This approach aims to facilitate autonomous task sensing and dynamic adapter switching, improving efficiency in handling composite tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It will be better to also compare with a model trained with MoE upcycling and discuss the benefit of the proposed method.\n- It should be a more detailed analysis of the triton operator, how it differ from methods like S-LoRA.\n- The legend in Figure 3 is too small"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How to reduce the re-training costs when some LoRA adapters are updated? How many LoRA adapters can be supported by MeteoRA?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "+ Using gated MoE for automatic selection of LoRA adapters\n+ Employing efficient GPU kernel operators for forward acceleration"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes MeteoRA to enable scalable multi-task LoRA embedding within LLMs. The key of MeteoRA is using gated MoE to automatically select the most pertinent LoRA adapters to generate appropriate responses. It also employs efficient GPU kernel operators for forward acceleration. Evaluation results show the effectiveness of MeteoRA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Re-training is required when some LoRA adapters are updated, making it hard to use in practice"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the appendix, the authors mention, \"However, given the limited capability of the instruction following in the zero-shot setting, neither the MeteoRA models nor the models fine-tuned by LoRA achieve satisfactory results.\" Is there any supporting evidence for this statement? Also, why did the authors ultimately choose a 2-shot setting?\n2. According to Figure 3, MeteoRA's performance on the ParaSeg task is noticeably poor when using LlaMA2, but it shows significant improvement with LlaMA3. Could the authors provide an explanation for this?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "### Originality\nSince the gate logits approach is primarily inspired by methods from Mixtral of Experts, the main novelty lies in the design of the loss function and the implementation of forward acceleration. This method can solve up to ten sequential problems in a single inference pass automatically, which demonstrates both the scalability and utility of the proposed solution.\n\n### Quality\nThe technical details appear correct, as the paper does not involve extensive mathematical derivations. The authors also introduce efficient acceleration strategies for MoE, which contribute meaningfully to improving the computational feasibility of such models.\n\n### Clarity\nThe paper is generally clear in presenting the proposed methodology and results.\n\n### Significance\nThe work addresses a significant challenge in the field of parameter-efficient fine-tuning (PEFT) by proposing a novel approach to autonomously manage and switch between multiple LoRA adapters embedded within a single LLM. The proposed framework is of considerable practical value for large-scale language models and downstream applications. The emphasis on practical deployment of the framework and the specific use of LoRA in a multi-task setting distinguishes this work from previous studies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose an automatic selection model for multi-task LoRA using a MoE arch, supporting both top-1 and top-k selection methods. In addition to constructing an automatic selection process using Gate Logits, they also utilize PyTorch's bmm operator for forward acceleration. The authors demonstrate the effectiveness of their proposed algorithm across multiple tasks by comparing it with existing multi-task LoRA methods as well as their own baselines, LoRA-F and LoRA-B."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Originality\nThe gate logits approach is primarily inspired by methods from Mixtral of Experts, and the paper could more clearly emphasize its origin and what it is (just a linear layer with softmax).\n\n### Clarity\nThe description of the gating network is insufficient. More details on the construction of composite-3/5/10 tasks should also be provided in the main text. More motivations should be included.\n\n### Writing Quality\nThe LaTeX formatting should be unified, such as using consistent notation for $o_{base}$ and $W_\\mathrm{base}$ in Equation (2). Additionally, consistent notation should be used in the formulas (e.g., whether the vectors are row or column vectors, and whether matrices operate on vectors from the left or the right). For instance, in Equation (2), the expression $o = xW_\\mathrm{base} + x\\Delta W_{I(x)}$ is used, while in Appendix A, $h = W_\\mathrm{base}x + B_iA_ix$ is used."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A scalable and efficient LoRA embedding framework for LLMs, enhanced with novel MoE forward acceleration strategies that significantly boost inference speed."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024meteora,\ntitle={Meteo{RA}: Multiple-tasks Embedded Lo{RA} for Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yOOJwR15xg},\nnote={under review}\n}"
},
"abstract": {
"value": "The pretrain+fine-tune paradigm is foundational for deploying large language models (LLMs) across various downstream applications. Within this framework, Low-Rank Adaptation (LoRA) stands out for its parameter-efficient fine-tuning (PEFT), producing numerous reusable task-specific LoRA adapters. However, this approach requires explicit task intention selection, posing challenges for autonomous task sensing and switching during inference with multiple existing LoRA adapters embedded in a single LLM. In this work, we introduce MeteoRA (Multiple-Tasks embedded LoRA), a scalable and efficient framework that reuses multiple task-specific LoRA adapters into the base LLM via a full-mode Mixture-of-Experts (MoE) architecture. This framework also includes novel MoE forward acceleration strategies to address the efficiency challenges of traditional MoE implementations. Our evaluation, using the LlaMA2-13B and LlaMA3-8B base models equipped with 28 existing LoRA adapters through MeteoRA, demonstrates equivalent performance with the traditional PEFT method. Moreover, the LLM equipped with MeteoRA achieves superior performance in handling composite tasks, effectively solving ten sequential problems in a single inference pass, thereby demonstrating the framework's enhanced capability for timely adapter switching."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"LoRA"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/863f6792e5c7dde03fc9de37da985f421af04502.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MeteoRA: Multiple-tasks Embedded LoRA for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yORSk4Ycsa | ReCogLab: a framework testing relational reasoning, cognitive hypotheses on LLMs | main | Active | Congitive Science;Large Language Models;Datasets;Evaluation;Relational Reasoning | datasets and benchmarks | 3;3;5;6 | 3;4;4;4 | 1;3;2;3 | 1;1;2;3 | 1;3;2;2 | 4.25 | 3.75 | 2.25 | 1.75 | 2 | 0.555556 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "As seen above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**Flexibility and universality**\n\nReCogLab has significant advantages over the previous dataset in terms of flexibility and universality. Such a dataset would bring many conveniences to research in cognitive sicence and holds significant practical value.\n\n**Good breadth**\n\nThis article demonstrates a good breadth, reprising experiments from multiple aspects of cognitive science, and measuring the ability of multiple LLMs across various dimensions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This article introduces ReCogLab, a flexibly generated dataset for quantitatively measuring relational memory and reasoning performance in LLMs, and conducts several experiments using the framework. ReCogLab offers a fully customizable and automatically generated dataset, enabling researchers to delve into the nuances of cognitive processes.\n\nThe paper begins by introducing ReCogLab. Examples generated by ReCogLab come from one of four tasks: Comparison, Social Networks, Syllogisms and JSON Families. Each example includes context C, question Q and answer(answers) A. Every aspect of the data generated can be controlled via a configuration file specifying parameters, which allows for the creation of datasets with varying length and complexity. And for each specific cognitive probe, ReCogLab generates 50 validation examples to select the best prompt and 1000 test examples for comprehensive evalution. Notably, the authors decoupled the prompt and parsing hyperparameters from the LM to minimize the impact of prompting on LMs’ performance.\n\nThen, the authors conduct several experiments to benchmark relational reasoning performance across different models and problem complexities and explore how reasoning performance depends on certain features. The article looks at transitive inference, congruency, the symbolic distance effect, identifying logical inconsistencies and indeterminate reasoning. These experiments were all made with little additional effort using the ReCogLab framework by altering the configuration files. The findings reveal that many human cognitive phenomena are mirrored in LLMs.\n\n**Experiment 1: Transitive Inference**\n\nPurpose:\n\nTo evaluate models’ ability to reason accurately about associations presented in context and measure whether there was a dependence on presentation order, and further measure how the ability depends on complexity measures of the reasoning problem. \n\nResults:\n\n1.(Comparison and Syllogisms) Randomizing the order causes noticeable degradation while reversing the presentation order affects performance in many but not all cases.\n\n2.(Syllogisms)The authors speculate that consistent presentation order may bias the model toward approximate solutions.\n\n3.Performance generally degrades as the complexity of the problem increases across all models.\n\n**Experiment 2: Congruent and Incongruent**\n\nBackground:\n\nLogical reasoning is more accurate when logical premises are congruent with real-world facts for LLMs(as it is for humans).\n\nPurpose:\n\nTo investigate the impact of congruence on language model performance.\n\nResults:\n\nUsing congruent statements outperforms similar comparison problems constructed with incongruent statements.\n\n**Experiment 3: Symbolic Distance Effect**\n\nPurpose:\n\nTo validate that LLMs will also show a symbolic distance effect.\n\nResults:\n\nFor models that perform above chance, there is a clear positive symbolic distance effect that starts when the symbolic distance is greater than 15.\n\n**Experiment 4: Identifying Inconsistent Premises**\n\nPurpose:\n\nTo explore whether LLMs have the ability of identifying when a premises is logically inconsistent.\n\nResults:\n\nLarger LMs perform better on detecting inconsistent statements.\n\n**Experiment 5: Indeterminate Reasoning**\n\nPurpose:\n\nTo evaluate models’ ability to understand when there’s insuffient information to draw a conclusion.\n\nProcedure:\n\nThe authors start with comparison problems which contain a fixed label set of Yes or No. Then they modify the comparison problems from a linear chain to a random tree generation while still asking questions about two random nodes, which provides insufficient context.\n\nResults:\n\nThere exists a bias toward incorrectly answering \\textquotedblleft yes\" on logical prompts when it is uncertain and reporting uncertainty when in fact it is unknown."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Lack of evaluation of dataset quality**\n\nWhen evaluating the quality of a dataset, people commonly employ methods such as statistical analysis, visualization, cross-validation, and expert review. However, in this article, we only see qualitative expressions about the performance of ReCogLab, such as its comprehensiveness and flexibility. And I’m wondering would it be possible to add some quantitative indicators to more strongly demonstrate the quality of ReCogLab or compare ReCogLab with those originally used in the same or similar experiments to illustrate its advantages?\n\n**Lack of detailed experimental procedures**\n\nThe description of the experimental procedures could be more detailed. It did not provide a detailed explanation of how ReCogLab was used in these experiments and how exactly the parameters were set.\n\n**Unclear motivation**\n\nThe authors haven’t provided evidence for their classification (comparison, social networks, syllogism, json families). What’s more, the decision to test all tasks as a whole is questionable, especially considering that previous research has addressed these aspects individually. The necessity of compiling these tasks for collective testing is not immediately clear, and this approach may need further justification.\n\n**Lack of insights**\nThe authors simply test LLM without delving into a more detailed analysis of each aspect, which results in superficial analyses. The authors should perform deeper analysis, such as for the transitive inference, the authors should offer an explanation of why the change of order matters for LLM, for congruent why is it important (whether this phenomenon comes from data or from training algorithm or transformers architecture), ...\n\n**Lack of novelty**\n1. In Experiment 1, the authors' assertion that \"premise order matters\" was previously identified in the article \"Premise Order Matters in Reasoning with Large Language Models\" published on May 28th. In Experiment 2, the earlier paper \"Language Models, like humans, show content effects on reasoning tasks\" demonstrated the importance of congruence in reasoning. Furthermore, there appears to be no significant distinction between relational reasoning and general reasoning. Could the authors compare with those above-mentioned papers to address the novelty of this paper? Furthermore, could the authors talks about the difference between relational reasoning and general reasoning?\n\n 2. Additionally, in Experiment 4, the detection of inconsistency is an old topic, and it comes as no surprise that Large Language Models (LLMs) are capable of performing this task, could the authors demonstrate some new discoveries in their experiments? Moreover, this topic bears a striking resemblance to Knowledge Base Question Answering (KBQA), can the authors provide the difference between the topic of relational reasoning and KBQA? \n\n**Some editorial errors and missing citations**\n\nIntroduction paragraph 1: While recent work...\n\nCitations are missing.\n\n\n\nIntroduction paragraph 2: From this literature...\n\nThe authors didn’t point out what “this literature” refers to.\n\nIntroduction paragraph 3: In this work, we aim to provide an evaluation framework that allows for the systematic evaluation or LLMs on relational memory and investigation of different possible effects(many inspired from the cognitive science literature).\n\nMaybe it means “of”?\n\nBackground paragraph 1: $A > B$, $B > C$, $B > C$, $C > D$, $D > E$\n\nPerhaps “$B > C$” is repeated?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* It would be good if the authors could provide more details on their framework and how it works. \n\n* It would be good if the authors tested their frameworks on open models in a way that allowed them to draw scientific conclusions about why some models fail and some don't or show the effects they do. \n\n* It would be good if the authors justified their tasks (I'm not saying they aren't good tasks, I'm saying the authors should motivate them further). The introduction and background could use major tightening and also moving up the fact that you tested indeterminate reasoning and inconsistent premises, and also to explain more how this differs from other major works recently looking at relationship reasoning. \n\n* It would be good if the authors connected this better to the specific psychological literature\n\n* It would be good if the authors provided statistical tests to back up their claims."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper has several welcome aspects: \n\n- Generally speaking, relational understanding is a basic part of cognition, so the topic itself is important. \n- Testing LLMs on their basic understanding of relations is useful for our understanding both of these models and of human cognition.\n- The paper includes more than a single example or a single model, allowing us to (in principle) draw more general conclusions\n- The 'stress testing' done by increasing the number of variables/nodes/edges is useful for assessment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper does several things: \n\n* It introduces ReCogLab, a way of generating data-sets for testing relational understanding in LLMs\n* It evaluates several LLMs on several relational understanding tasks, noting successes and limitations, and speculating on the connection to human reasoning about relations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While there are many positive aspects to the work, and I think it is generally in the right direction, there are some specifics to the paper that make it difficult to recommend acceptance:\n\nSome general comments: \n\n* While I understand the authors' decision not to provide full details of their framework, the fact that they present it as a major contribution of the paper while at the same time not providing full details of how the framework works and promising to do so only upon publication makes it difficult to assess it. \n\n* While I appreciate the use of several tasks and models, it isn't clear or justified why *these* tasks are specific to relational understanding (and not others), and the use of closed models makes it difficult to assess what is actually going on beyond saying \"these models do it well, those models don't\". I get that many of us are currently assessing models in this way and I do think it is useful to assess the state of the art, but at the moment the assessment of closed-models by mega-corporations falls more into doing QA for them than being able to say something scientific about why and how LLMs fail or succeed on these tasks. \n\n* The connections to human reasoning and psychology are welcome and overall ok, but there are many places where they could be improved (see more details below). Also, I found several of the cases of arguing along the lines of \"the models are doing poorly and people also do poorly so maybe they're the same\" to be speculative at best and not digging into the phenomena, again see more details below. \n\n* There are many cases where statistical claims are made without statistical tests to back them up. While I agree the tests would likely back up the claims I don't think severe of the claims are resting on solid ground at the moment (again, see more details below). \n\nComments in more detail (these are not in order of importance but more in order of reading the paper, so some of these are truly minor and had no real affect on the final assessment, they are provided in case they help the authors tighten the work):\n\n-- Not terribly important but the first paragraph is making a bunch of claims that would ideally be backed up by references. For example, \"While recent work on memory in large language models (LLMs) assumes that memory refers to the recall of a specific piece of information from the past\" -- which recent work?, or, when you say \"When humans remember events, facts, places, etc., they don’t just recall disconnected pieces, they recall the associations\" -- what is backing this? Similar comments apply to much of the second paragraph\n\n-- Given the importance of relational reasoning that the authors emphasize and I agree with, I found it a bit odd that when we do get to references to back it up in humans it is to a 1976 paper on a specific phenomena of symbolic distancing, a 2010 general textbook, and a 2018 neuroscience paper on episodic memory in the hippocampus. This feels like a \"grab bag\" of disconnected things.\n\n---- Side note, related to the above: With all respect to Domjan (2010) I found the frequent reliance on an undergrad psych text-book throughout the paper odd. It would be good to back up your claims with regards to more specific papers in the literature. \n\n-- As mentioned in the general comments: I'm not knocking the specific things you decided to test too hard (syllogisms, comparisons, etc) but it would be good to provide some stronger justification for why *these* specific things and not others. Do they specifically cover a majority of relational reasoning? Are they uniquely interesting in some way? I'm not saying they don't or aren't, but the reader shouldn't have to do that work. Also, while we're here: Given the weight and importance you attach to identifying inconsistent premises later in the paper, they seem to come out of nowhere because they aren't really justified in the introduction and given weight or airtime in the figures and background and text that discusses the other cases you're looking at. \n\n-- I said I wouldn't knock the specific tests too hard, but I think the 'social networks' one deserves even more justification, given that it doesn't seem to actually be about 'relations' or 'social reasoning' but, as the authors themselves point out later, navigation of graphs independently of the 'relations' between people. It doesn't really seem to be about relational understanding in the way that, say, \"X is on Y\". The results would presumably replicate with just \"X is to the right of Y\" or various other room descriptions. So, why go through stuff like \"X is the child of Y, Z never shares T's secrets...\"; it all seems kind of unnecessary, and would probably not really work in humans. The interesting stuff about enemies and non-relations is mentioned by the authors but not explored further. \n\n-- super-duper-minor: For figure 1 it would be nice to have the colors match up with the text. For example, the setup for comparison is \"orange > bottle > tv > dog\", yet in the text dog is red and orange is blue, while in the graph the red node is 'bigger' than the blue node and is separated by only 1 node from it. \n\n-- I get the focus on LLMs and it is good to keep it, but it would be nice to at least acknowledge briefly in the background that other models have tried to model this stuff that don't rely on LLMs. The entire field of Inductive Logic Programming seems relevant, for example. \n\n-- The first paragraph in the background is very disjointed, it makes 3 separate points that don't connect.\n\n-- \"Substantial work has compared reasoning in LLMs to humans\" -- it is odd to follow this sentence up with a reference to Stupple & Ball (2008) or Evans et al. 1983, which took place before 'LLMs' were a term. I'm not sure if the authors were tripped up by the use in the paper of \"parallel processing\" (which doesn't mean what it means in PDP connectionism).\n\n-- I appreciate mentioning relevant LLM papers in the background section but it would be good if the authors further delineate what their work contributes beyond the already substantial contributions of those works, right now it isn't particularly clear what the contrast is. \n\n-- ״because friendship is a symmetric relationship, social networks are undirected graphs״ -- while it is true that many social networks are modeled to a first approximation using a simple undirected graph, I'm sure people in social psychology would balk at the statement that 'friendship is a symmetric relationship'. Also the consequent doesn't follow, social networks describe friendships as well as rivalries, hierarchies, family relations, and many other things besides not captured by a simple representation along these lines. As mentioned above, it is not clear to what degree this task is about 'relational understanding' more than it is about navigating an undirected graph, where the graph could describe spatial relations, concepts, or nonsense. \n\n-- it wasn't entirely clear to me how you check for grounding in a generalizable way. That is, I understand that in this case specifically, you used a different LLM to generate the sizes and weights of things, but if I wanted to ensure that my data generally matches reality for some arbitrary property, this seems like a currently unsolved problems in LLMs so we can't simply say 'oh, we'll use LLMs to make sure it is grounded'. \n\n-- I appreciate the decoupling of prompting, but what are the actual details of how this decoupling happened? Which prompts did you use and why? \n\n-- \"publicly available language models\" doesn't mean \"open\"\n\n-- There are several cases in the paper where the authors make claims that are meant to be backed up with a statistical test. \n\nFor example:\n\" In Fig. 2 we see this experiment for Comparison and Syllogisms. For Comparison, we can clearly see across nearly all models randomizing the order causes noticeable degradation (Gemma 2B and 9B perform near or below chance of 0.5, guessing or giving invalid answers). Reversing the presentation order also often negatively affects performance in many, but not all cases.\"\n\n'We can clearly see' is not a statistical test. Presumably you want to use an ANOVA here or one of a myriad of other ways of assessing this claim. Similarly 'negatively affects performance in many, but not all cases' is a non-exact, non-specific way of putting a result. \n\n* \"For most models we see a curve in which accuracy decreases with symbolic distance for very small symbolic distances, but then improves for symbolic distances >15.\" -- Again, this is highly inexact. What do you mean 'most models', what do you mean 'improves above 15'? What statistical test are you running to say these statements? Are you comparing a linear fit to a quadratic fit, for example? Are you simply fitting a linear line and looking at the rise? Are you doing a piece-wise and finding a transition point at 15, and what is associated value of the parameters for any statistical test you ran?\n\n* \"\"Gemini Pro actually levels out rather than decreasing and GPT-4o only decreasing slightly, suggesting that they are particularly good at needle-in-the haystack style questions\" -- here and elsewhere, while I appreciate the speculation, we see the problem with evaluating these closed models, we simply have no way of knowing what is going on. \n\n-- The authors should probably face head-on the fact that spatial distancing in these models isn't working like it does in people. In people it drops from a distance of 1 to 2, but then steadily rises. For these models it seems (to the degree that one can say anything) more like a u-shaped function. \n\n-- I didn't understand why you tested the effect of capacity in the ranges you did, sometimes it seems to go 0-100, sometimes 0-50, sometimes 1-20, sometimes 5-30. In cases where we see degradation it is reasonable to argue it exists, but in cases where we don't (JSON family, values 1-20) it isn't clear if this is a result of not going beyond the current range. \n\n-- Super minor hair splitting: 'nonsense relations' like 'Benny blexed Kelly' is not 'inconsistent' with the real world in the same way that 'iron is lighter than air'."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Why are previous benchmarks inadequate?\n2. Why are these methods preferred?\n3. Why do LLMs succeed or fail on these different benchmarks?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "Examining the capacities of LLMs is important, and it is important to understand the more difficult aspects of potential cognition such as relational reasoning. Establishing benchmarks is an important part of building this research program."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "ReCogLab provides a series of benchmark tasks for relational memory. Although the tasks themselves do not necessarily seem problematic, the coherence of this series of benchmarks was unclear. Why were these tasks used as opposed to others? The paper would be greatly improved explicitly stating the criteria for selecting these tasks and articulating how these tasks relate to different theoretically distinct aspects of relational reasoning. Building specific hypotheses with respect to different cognitive operations may allow the reader to better understand how to position or understand differences between tasks, especially with the potential for different results. As part of this. the authors could more explicitly articulate specific reasons LLMs (either in general, or specific LLMs) would perform on some tasks compared to others.\n\nIn the background, the authors note that some of these tasks have been used in isolation, but there was limited information about how LLMs did on those tasks, and other than having all together in one set, how these benchmarks were preferred to the ones in the literature. Please provide summaries of previous LLM performances on related tasks. What do the authors see concretely about using this set as opposed to the previous tasks?\n\nIf this is a benchmark, is it necessary to have human data? Do humans perform differently on these tasks, and might that be a reason that the different benchmarks are needed? The authors should consider whether human data would help understand LLM performances on these tasks, or whether human data would provide insights into how to understand differences between the tasks. What results would the authors expect?\n\nIt is unclear why this is relational memory as opposed to relational thinking. What aspects of this make the processes specifically memory related? Is memory necessary for solving these tasks in a way that is different than other LLM tasks (one could make the argument that all LLM tasks require some degree of memory).\n\nIn sum, the weaknesses of this paper come from inadequate framing and positioning of its importance in the larger literature. It is possible with more information that the strengths of the paper would be more clearly understood."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although benchmarks are important, it wasn't clear from the writing why these benchmarks are the ones to be used. A disucssion about previous benchmarks is needed with attention to their limitations. The authors main claim is that there isn't a set of all these benchmarks together, but one should simply stitch together other benchmarks as a set. A discussion of why this is not a good strategy is needed. If these are benchmarks, human data may be needed to understand what good performance is."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What is the goal of studying this type of reasoning in LLMs? \n2. How might we use the outputs of ReCogLab to better direct LLM development and deployment?\n3. Did you run any experiments with Chain-of-Thought prompting? It would be interesting to determine whether this had any impact on performance."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper has several strengths. First, the production of a sandbox testbed, rather than a static, fixed benchmark, is exactly where evaluation should be going in AI and machine learning. ReCogLab is a toolbox for creating nuanced cognitive experiments, rather than a dataset that someone can take off the shelf and apply without much thought. Moreover, the toolbox significantly reduces the likelihood of future LLMs being trained directly on testing data - contamination is something that has significantly hindered the validity of many new and old benchmarks in the literature. Second, the authors identify a series of psychological effects that few have studied directly in LLMs, but which are key markers of behavioural similarity between us and them, allowing us to diagnose the human-likeness of these systems. Finally, the analyses are comprehensive and balanced. The authors are cautious not to prematurely ascribe human like cognitive capabilities to these systems, noting that there is more work to be done to show this. This is a refreshing contrast to much of the overzealous AI hype from other (non-cognitive-science) researchers in the field."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces ReCogLab, a sandbox evaluation paradigm for studying memory and reasoning in Large Language Models, drawing inspiration from the cognitive sciences. ReCogLab currently contains a toolkit for building memory and reasoning tasks based on four domains: property comparisons, social networks, hierarchical reasoning (which are called JSON families), and syllogistic reasoning. Each of these domains are formalised as tasks that probe graph-based reasoning. Problems are constructed as (directed) graphs, where each vertex is a person or an object and each edge is some directed relation between those objects. Graphs are built implicitly in ReCogLab and then described only in natural language vignettes. The LLM is then tasked with answering questions about the graphs, also in natural language. For instance, in the Social Networks domain, a graph is implicitly constructed that relates certain members of a social group. The graph is described with sentences such as 'X is friends with Y' and 'Y is friends with Z'. The LLM is then tasked with, for example, describing how a message would be most quickly communicated between two members of the social network, essentially by traversing the edges of the graph. The authors create a testbed of tasks from their four domains drawing inspiration from real experiments done with humans in the cognitive sciences, and study the reasoning and memory capabilities of six large language models. This enables them to study, in LLMs, a number of psychological tendencies that have been observed in humans. Ultimately, they observe many human-like effects emerging in LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a number of weaknesses in the paper. First, the authors only evaluate 6 LLMs. The addition of at least some members of the Llama series would lead to the results being more comprehensive of the state-of-the-art. Second, the authors could better review other calls to action for doing cognitive science experiments on artificial intelligence/large language models, as well as other frameworks that offer sandbox evaluation toolkits. I have included a number of relevant citations below. Third, the authors could have played more with chain-of-thought prompting to determine whether this leads to better performance on their tests. Finally, there are some minor linguistic disfluencies. The title is difficult to parse. \"A framework testing relational reasoning, cognitive hypotheses on LLMs\" contains two disjoint clauses that aren't related. This is off-putting to the reader. Furthermore, the abstract needs to be checked for sense. 'Phenomenon' should be plural 'phenomena' and the sentence started 'while some of these have been studied individually...' needs to be reworded as it doesn't make grammatical sense. Lastly, the term 'indeterminancy' should be replaced with 'indeterminacy' in the latter part of the paper.\n\nReferences\n\nHernández-Orallo, J. (2017). The measure of all minds: evaluating natural and artificial intelligence. Cambridge University Press.\nKiela, D., Bartolo, M., Nie, Y., Kaushik, D., Geiger, A., Wu, Z., ... & Williams, A. (2021). Dynabench: Rethinking benchmarking in NLP. arXiv preprint arXiv:2104.14337.\nThrush, T., Tirumala, K., Gupta, A., Bartolo, M., Rodriguez, P., Kane, T., ... & Kiela, D. (2022). Dynatask: A framework for creating dynamic AI benchmark tasks. arXiv preprint arXiv:2204.01906.\nVoudouris, K., ..., Cheke, L. G. (2024) The Animal-AI Environment: A Virtual Laboratory For Comparative Cognition and Artificial Intelligence Research. arXiv preprint arXiv:2312.11414."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We build a flexible dataset generator for relation reasoning in LLMs to probe different cognitive effects and biases."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024recoglab,\ntitle={ReCogLab: a framework testing relational reasoning, cognitive hypotheses on {LLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yORSk4Ycsa},\nnote={under review}\n}"
},
"abstract": {
"value": "A fundamental part of human cognition is the ability to not only recall memories, but to reason and manipulate information from them. In cognitive science and psychology, this is termed relational reasoning or relational memory and a number of cognitive effects and biases have been observed and proposed. Some of these effects include \\textit{congruence}, \\textit{the symbolic distance effect} and \\textit{transitive inference}. In addition, many other phenomenon of large language model performance have been observed or proposed. While some of these have been studied individually in prior work and datasets for general long-context inputs have been proposed, none of these has the flexibility to study all or even most of these hypotheses or phenomenon. In this work, we create a fully customizable, automatically generated dataset which allows us to study these effects in detail. We introduce four settings with multiple cognitive-reasoning-inspired tasks targeting different skills and difficulties with parameters of each of these being configurable to run probes on different abilities. With our framework, we test and find many of these human cognitive effects are repeated in LLMs and provide a number of interesting analyses."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Congitive Science",
"Large Language Models",
"Datasets",
"Evaluation",
"Relational Reasoning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c1aebe44687959062325ff95966fde436ef7b981.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "ReCogLab: a framework testing relational reasoning, cognitive hypotheses on LLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yOhNLIqTEF | Generalization of Transformers with In-Context Learning: An Empirical Study | main | Active | generalization;in-context learning;transformer | alignment, fairness, safety, privacy, and societal considerations | 5;6;8 | 4;3;3 | 2;3;4 | 2;3;3 | 3;2;4 | 6.333333 | 3.333333 | 3 | 2.666667 | 3 | -0.755929 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "On line 213, you state that you add a third class in order to balance the data for the Baseline model. Why did you not just sample more data from the 4 base function classes?\n\nDo you expect your results to hold for a different class of functions, s.a. polynomials?\n\nDo you expect similar results for non-transformer architectures?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "Pros: Addresses an important problem in a systematic way.; Clearly defined research questions and synthetic data. ;Extensive experiments.; Provides actionable insight for training transformers;"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents an empirical study which seeks to advance our understanding of the in-context learning abilities of transformers. It identifies three different types of generalization, depending on the training and test data: inter-problem (ICL of completely unseen problems), intra-problem (ICL of similar tasks) and intra-task generalization (ICL of already seen tasks).\nExperimental results are presented on synthetic data, involving trigonometric functions, as well as more realistic settings, namely tool-calling and translation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Cons: \n\nIt is not clear to me that the chosen synthetic functions are representative of ICL tasks that we care about, and that the conclusions drawn from them would be applicable to other synthetic functions.\n\nHaving a non-transformer baseline would have been useful in order to understand if the observed behaviours can differ, based on the architectures."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* The performance improvement on simple tasks when incorporating combination data is intriguing. \n - To ensure a fair comparison, did you control the training data size to have an equal number of samples related to the target simple tasks? From my understanding, combination data should also be factored in as they provide supplementary knowledge. \n - Additionally, have you tried a reverse inter- and intra-problem generalization setup to explore how effectively the model can learn directly from combination data? This could help better understand the generalization mechanism across different conditions. \n\n* Regarding the ComFuncLearner setting, did you observe any impact of data point order, such as when using a curriculum learning schedule? It seems feasible that learning samples from easy to hard could affect the model's performance, particularly for complex generalization tasks.\n\n* Why choose $\\mathcal{F}_5^{(0)}$ to be included in Baseline? It seems unrelated to the functions of interest and may hinder the performance."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* **Well-scoped problem formulation**.\nThis paper studies three different dimensions of the generalization with ICL in well-scoped scenarios with a clearly defined experimental protocol. The task formulation corresponding to each research question is clear and adaptable, making it straightforward to generalize findings across the chosen real-world domains.\n\n* **Comprehensive analysis and extended experiments**.\nThis paper conducts comprehensive experiments that strongly support their conclusions. There are also smart experiment designs in different scenarios to interpret the performance trend across different approaches. I also find the experimental setups sharp and non-trivial in real-world tasks such as tool calls.\n\n* **Convincing and intriguing results**.\nThe analysis regarding intra-problem generalization and how it works in different tasks provides insights into how data diversity and combinations can augment training for better generalization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work investigates the generalization boundary of transformers' in-context learning. \n\nThe authors propose systematically exploring this problem through three dimensions: (i) inter-problem: generalization on problem combination; (ii) intra-problem: generalization across problem combination with guidance; and (iii) intra-task: in-domain generalization. \n\nThey conduct experiments on function fitting, API calling, and translation and shed light on strategically designing training data by leveraging the intra-problem and intra-task generalization of ICL."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **Lack of analysis for a mechanistic understanding of intra-problem generalization**.\nWhile intra-problem generalization appears crucial to ICL, the paper does not sufficiently explore the mechanisms behind this phenomenon. They show that transformers excel in intra-problem generalization across function-combination operators, yet it's unclear how to disentangle the effect of training data in terms of the knowledge derived from functions versus operators. For example, the operator is like a second-order function that can be generalized to higher-order ones, which is particularly pertinent in real-world applications. More in-depth analysis or discussion on this would enhance the work's generalizability and provide greater insights into the workings of intra-problem generalization.\n\n* **Simple task formulation that may not apply to complex natural language tasks**.\nWhile the experiments cover domains like tool usage and provide consistent results, how these findings apply to complex natural language tasks, such as reasoning, is still unclear. For example, results on Llama3 suggest that pre-trained language models may struggle with function fitting despite extensive training corpora. This highlights a domain shift challenge that could be especially relevant for NLP tasks due to the inherent diversity in language. Addressing this limitation or providing additional insights into generalization on natural language tasks could broaden the study’s applicability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* **Q1**: What is the meaning of the superscripts in Eq. 2 and other equations? I.e., what does $(0)$ mean? It appears that it is always the same throughout the paper and seems unnecessary (e.g., superscripts are absent in Eq. 3).\n\n* **Q2**: What is the range of $\\phi$ in the experiments?\n\n* **Q3**: Are the GPT-2 models trained with permutation invariance? If either yes or no, why? I think it’d be meaningful to train it with permutation invariance, as it better fits the considered sinusoidal ICL problems (Eqs. 2 & 3).\n\n* **Q4**: How are test functions generated, e.g., in Fig. 2? For example, Fig. 2 left (similar Fig. 3 left) shows $f(x)=sin(x)-0.5$. However, this is different from the functions defined in Eqs. 2 & 3 or Fig. 1, e.g., $\\mathcal{F}^{(0)}_1=\\lbrace\\phi sin(x): \\phi\\in\\mathbb{R}\\rbrace$. Importantly, that may influence the results. This could impact the results, as seen in Fig. 2 (second plot from the left), where both methods closely follow the sinusoidal shape but appear shifted.\n\n* **Q5**: l. 255-258: should it be Baseline instead of ComFuncLearner? If not, could you please clarify what is meant in that sentence?\n\n* **Q6**: Do the findings from Sec. 3.1 also hold when we train the ComFuncLearner on the convex combination $\\mathcal{F}_1^{(0)}+\\mathcal{F}_2^{(0)}$ instead of $\\mathcal{F}_1^{(0)}+\\mathcal{F}_3^{(0)}$?\n\n* **Q7**: Why was Llama-3 chosen in Sec. 3.4? The experiments would be more consistent if they also used Llama 2, as in Sec. 4 (as they must for fair comparison with ToolLlama).\n\n* **Q8**: Can you provide results for the mixed translation task (Sec. 4.2)?\n\n* **Q9**: How do the authors account for synonyms/paraphrases in the translation experiments (Sec. 4.2)? I tried to input the English output sentence (“Policies and measures…”, Fig. 10) into different translators and translated them to German and back to English. They often yielded very different sentences but the core meaning remained the same. Other metrics than BLUE, e.g., BertScore, may be better fit. \n\n## Suggestions\n\n* **S1**: Most experimental Figs. & Tabs: I suggest writing out the functions (e.g., f(x)=sin(x)) instead of the current format of the subplot titles or table entries. For example, in Tab. 3 & Fig. 4 for compositional combinations, it’s challenging for readers’ to figure out what’s the inner and outer function at a glance.\n\n* **S2**: The paragraph in l. 263-265 appears to address two separate things that could be separated for clarity.\n\n* **S3**: It'd be good to add the description of the ComFuncLearner model variants in l. 364-366 also in Sec. 3.2.\n\n* **S4**: The meaning of the red numbers in Table 4 should be clarified, especially since the number for Baseline+ICL_s under Average_s is not directly computable from the table (likely due to rounding)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* **S1**: The work’s systematic setup across the three axes is very well-designed (Secs. 2.2 & 2.3). The real-word experiments (Sec. 4) are also set up sensibly.\n\n* **S2**: The results for compositional combinations are validated on real data and commonly used LLMs. Findings and conclusions from the synthetic experiments from Sec. 3.3 also hold on real data for compositional tasks.\n\n* **S3**: The paper is generally well-written and clear (except the minor points from the questions/suggestions below).\n\n* **S4**: The paper studies a problem that I believe is relevant to the research community. Understanding when ICL will generalize and when it will not is an important research question."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The present work studies in-context learning (ICL) generalization. The authors’ ICL problems considered sinusoidal functions with varying frequencies and amplitudes (Eq. 2) as basis functions, and additive, multiplicative, or compositional combinations of two of such sinusoidal basis functions (Eq. 3). They considered 3 axes for ICL generalization:\n* Inter-problem generalization: Can models trained on the basis functions alone generalize to additive, multiplicative, or compositional combinations of functions?\n* Intra-problem generalization: Can a model trained on some functions generalize to other, functionally very distinct functions (e.g., combinations of functions with different frequencies)?\n* Intra-task generalization: Can a model trained on some functions generalize to functionally similar functions (e.g., only changes in amplitude that don’t change the x-positions of characteristic points like minima)?\n\nThroughout the experiments, the authors found that transformers show intra-task and intra-problem but lacked inter-problem generalization (Secs. 3 & 4). They partially verified that these findings on synthetic data also hold on real data (Sec. 4). They conclude that task diversity is essential for improved ICL generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **W1** (also see **Q4**): The paper claims that “transformers lack inter-problem generalization with ICL” (l. 19), yet Fig. 2 left or Fig. 3 left show that transformers generalize to y-axis shifted sinusoids (to a certain extent). This seems contradictory. A more nuanced analysis of when transformers can and cannot generalize across ICL problems would strengthen the paper.\n\n* **W2**: The finding that task diversity aids inter-problem generalization is neither novel nor surprising. This is a known reason, e.g., for training LLMs on large, diverse text corpora, as diversity is known to be a key ingredient for generalization. Other popular research areas (e.g., model robustness) also found that data is a key factor. That this is also the case for ICL generalization is expected.\n\n* **W3**: The paper does not offer new insights when transformers do or do not generalize in ICL, particularly in inter-problem generalization. For instance, I think it is clear that the baseline transformer won’t generalize to convex combinations with basis functions with different frequencies (e.g., $sin(x)+sin(2x)$) when trained only on sinusoidals with the same frequency, as these combinations are not well-supported by the training data.\n\n* **W4**: It seems that the combined ICL problems in Eq. 3 will have function values outside the range of those in models trained only on the basis function from Eq. 2. However, this may also be a misunderstanding, as the values for $\\phi$ in the experiments are not provided (see **Q2**).\n\n* **W5**: The experiments on convex combinations (Sec. 3.1) and product combinations (Sec. 3.2) are unrelated to the real-word experiments in Sec. 4. It is unclear whether the findings on the synthetic data also hold in the real-world setting for these combinations.\n\n* **W6**: Code is not provided and certain details are missing, e.g., range of $\\phi$. Providing code or ensuring all needed experimental details are provided would enhance the paper’s reproducibility."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024generalization,\ntitle={Generalization of Transformers with In-Context Learning: An Empirical Study},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yOhNLIqTEF},\nnote={under review}\n}"
},
"abstract": {
"value": "Large language models (LLMs) like GPT-4 and LLaMA-3 utilize the powerful in-context learning (ICL) capability of Transformer architecture to learn on the fly from limited examples. While ICL underpins many LLM applications, its full potential remains hindered by a limited understanding of its generalization boundaries and vulnerabilities. We present a systematic investigation of transformers' generalization capability with ICL relative to training data coverage by defining a task-centric framework along three dimensions: inter-problem, intra-problem, and intra-task generalization. Through extensive simulation and real-world experiments, encompassing tasks such as function fitting, API calling, and translation, we find that transformers lack inter-problem generalization with ICL, but excel in intra-task and intra-problem generalization. Furthermore, when the training data includes a greater variety of mixed tasks, it significantly enhances the generalization ability of ICL on unseen tasks and even on known simple tasks. This guides us in designing training data to maximize the diversity of tasks covered and to combine different tasks whenever possible, rather than solely focusing on the target task for testing."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"generalization",
"in-context learning",
"transformer"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b9aa093504723a37f1fdb4b26fc3a7feefa6f370.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Generalization of Transformers with In-Context Learning: An Empirical Study"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yOrtDi6IXs | Provably Efficient Linear Bandits with Instantaneous Constraints in Non-Convex Feature Spaces | main | Active | Linear Bandits;Non-convex feature spaces;Instantaneous hard constraints;Safety;UCB | learning theory | 3;3;5;6 | 3;4;3;2 | 3;3;2;3 | 2;1;3;2 | 3;3;2;3 | 4.25 | 3 | 2.75 | 2 | 2.75 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How is the lower bound (Theorem 2) different from Pacchiano et al. (2021)?\n- I would suggest that the authors are more precise in how they discuss their contributions with respect to the claim of \"first result for non-convex action sets.\" Previous works have considered non-convex sets, including those that are star-convex (Moradipari et al, 2021), as well as discrete sets (Pacchiano et al, 2021). I understand what the authors meant (i.e. first for non-star-convex sets and round-wise constraint satisfaction), but I suggest more precision to avoid confusion."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The problem that this paper considers is a nice one (non-star-convexity in safe linear bandits). The requirement of star-convexity is a significant limitation in present works on safe linear bandits. The presentation is also good. Their use of visuals is quite effective."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of stochastic linear bandits with instantaneous linear constraints, where the action set is not star-convex. Under an assumption on the action set that is weaker than star-convexity, they given an algorithm with $\\tilde{O}(d\\sqrt{T})$ regret."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although I think the problem is important, this paper makes very little contribution to the literature. The core result is that they show $\\tilde{O}(d\\sqrt{T})$ regret under Assumption 3, which assumes that either (1) the constraint is not tight on the optimal point, or (2) a line segment in the direction of the optimal point and of sufficient length is within the action set. The results of Amani et al (2019) immediately show $\\tilde{O}(d \\sqrt{T})$ regret under condition (1). As for condition (2), the specific requirement is quite contrived and appears to be just a quirk of the analysis of Pacchiano et al (2021). I don't think this contribution alone is sufficient to justify another paper.\n\nAdditional points:\n- The lower bound (Theorem 2) looks like it is essentially identical to that in Pacchiano et al (2021), and I would suggest adding discussion if/how it is different.\n- I think that the related work section should probably be in the body of the paper (at least in part) and I would suggest adding [1].\n- line 040: the paper writes that \"the reward for an action in stochastic linear bandits is modeled as the inner product between a feature vector and unknown parameter.\" Really, it's that the *expected* reward is modeled as such.\n\n[1] Gangrade, Aditya, Tianrui Chen, and Venkatesh Saligrama. \"Safe Linear Bandits over Unknown Polytopes.\" The Thirty Seventh Annual Conference on Learning Theory. PMLR, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- **Necessity and Correctness of Assumption 2**: To me the assumption 2 seems unnecessary, or could there be an error in its statement? Here are some observations:\n - This action yields initial estimates of θ and γ as zeros.\n - In the toy example on non-convexity bias (line 403), the action set does not include $a_0 = [0,0]^{\\top}$.\n - In the experiment described on line 491, the action set also does not include $a_0 = [0,0]^{\\top}$.\n - In Lemma 7 (line 877), it is stated that $||\\phi(a_t)||\\geq \\epsilon$ but $a_0$ does not satisfy this condition.\n\n- **Question about Assumption 3**\n - The first condition in Assumption 3 seems intuitively too strong. If we have the circle $||a_1||_2= 1$ in our action space, it results in a smaller safe circle within the action space. However, aren't two basis safe actions sufficient to estimate $\\theta$ and $\\gamma $ accurately? Are all the actions within this smaller safe circle necessary for the proofs and experiments? If so, is there a toy example that the algorithm won't work with only two basis safe actions?\n - Constraints on $L$ and $\\tau$: When $L$ is very large (e.g., 100) and $\\tau$ is very small (e.g., 0.01), how does the condition $L - \\epsilon \\leq 1$ hold in the second condition? Does this imply there are constraints on the relationship between $L$ and $\\tau$? Could the authors provide more details or adjust the assumption accordingly?\n\n- Could the authors elaborate on \"What is the correct strategy?\" Specifically, how is $b_t(a_1) = r^* - \\tfrac{1}{3} = \\tfrac{2}{3}$ derived? This seems different from equation (3).\n\n- **Computational Issues:** I feel the proposed algorithm cannot be implemented to more general settings than discrete action space and have the following questions.\n - **Non-Discrete and Non-Convex Cases:** How does the proposed method computes the line 6 of Algorithm $a_t$ the non-convex non-discrete cases, such as when in action sets contains non-convex continuous regions? \n - **Convex Cases:** Even in convex regions, as $b_t(a)$ in (3) takes much complicated form, is the optimization problem in line 6 of Algorithm $a_t$ still a convex program as in linear bandit problem that can be approximate efficiently?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The algorithm employs double confidence intervals in conjunction with the exploration bonus $b_t(a)$, which seems interesting. Assumption 3 is also interesting as it captures the locally information. Additionally, the paper provides both upper and corresponding lower bounds, which strengthen the theoretical contributions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers safety-constrained linear bandit problems under non-convex or discrete feature spaces. It introduces the Local Point Assumption, which assumes that the feature space is locally continuous or that the optimal action does not lie on the safety boundary. The paper develops the NCS-LUCB algorithm with a newly designed exploration bonus $b_t(a)$."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In my view, the statements of the assumptions and their explanations are not clear and needs more elaboration. Please see the questions below for details. I am happy to discuss more in the rebuttal periods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The lower bound is of order $\\Omega(1/\\iota^2)$, while the upper bound is of order $O(1/\\iota)$. This appears to be contradictory, as $1/\\iota^2 > 1/\\iota$. Could you please clarify this discrepancy?\n\n2. Lines 324-328: Please provide a more detailed explanation of why $\\alpha\\phi(a_*) \\in \\phi(A_t)$ implies that the distance between $\\alpha\\phi(a_*)$ and $\\phi(A_t)$ is less than $g_t^1(a)$.\n\n3. How did you find Assumption 3? Were there any related works that inspired this assumption?\n\nAlthough I am familiar with bandits, I have not yet encountered safe bandits. Therefore, I will adjust my score based on discussions and feedback from other reviewers."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written, particularly in its comprehensive discussion of key points.\n\n2. Providing both upper and lower bounds simultaneously enhances the completeness of this work and renders the newly proposed assumptions more convincing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates safe linear bandits in non-convex feature spaces, expanding the arm space from star-convex to local-point. The primary innovative technique is the newly proposed bonus term in Equation (4)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Expanding the arm space from star-convex to local point assumption offers limited contributions.\n\n2. The technological innovation primarily lies in the newly proposed bonus term (4)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In Assumption 1, it is assumed that $\\max(\\|\\theta^*\\|, \\|\\gamma^*\\|) \\leq \\sqrt{d}$. Is it possible to bound it by other notations, e.g., $B$? In this case, the dependence on this quantity can be better reflected in the final regret bound.\n- Is it possible to provide problem-dependent upper and lower bounds, consisting of the reward and cost gaps?\n- According to Pacchiano et al. (2024), the minimax lower bound for the convex case is $\\max \\left( \\frac{d \\sqrt{T}}{8 e^2}, \\frac{1 - r_0}{21 (\\tau - c_0)^2} \\right)$ which is of order $\\Omega(d\\sqrt{T})$. And Theorem 2 suggests the minimax lower bound in the nonconvex case is $\\max \\left\\\\{ \\frac{1}{27} \\sqrt{(d - 1) T}, \\frac{1 - 2\\epsilon}{\\epsilon} \\left( \\frac{1 - \\iota}{\\iota} \\right)^2 \\right\\\\}$ which is of order $\\Omega(\\sqrt{dT})$. It is expected that the nonconvex case is a harder problem, thus, the lower bound should be larger. Can the author comment on this? In addition, how does $\\tau$, the threshold on the cost, influence the lower bound?\n- Is it possible to show the price (additional regret) for the nonconvexity in the arm set, compared to the convex arm set case?\n- Can the authors kindly compare this work with Amani et al. (2019), Pacchiano et al. (2024), and Moradipar et al. (2021)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is clear and is easy to follow.\n- The problem of the bonus term in the previous literature in the context of non-convex arm set is well explained via examples and the intuition for the new bonus term is also clear.\n- Both problem-independent upper bound and lower bound are provided, showing the importance of the parameters $\\epsilon,\\iota$ in the problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper extends the stage-wise safe linear bandits to the non-convex spaces. Compared with the prior work Pacchiano et al. (2024), which studies the stage-wise safe linear bandits under the assumption that the arm set is convex, this paper finds the limitation of the algorithm LC-LUCB when the arm set is a non-convex spaces. By redesigning the bonus term in the confidence radius, the improved algorithm NCS-LUCB is capable of dealing with non-convex arm set."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Majors**:\n- In Line 376, the BOB method is only **adopted** by Zhao et al. (2020), and it is firstly **proposed** by [1]. \n- The technical contribution is limited. While the introduction of the new bonus term can be important, the rest of the analysis is quite standard.\n- The proposed bonus term consists of $\\iota$, which appears in Assumption 3 and is not known in practice. While it mentions the BOB technique [1] may be adopted to solve, it does not further investigate on it. As the design of the bonus term is the main technical contribution of this paper and the parameters $\\iota$ plays an important role in the bounds, it is expected that the authors can provide a complete solution towards the bonus design.\n\n**Minors**:\n- It would be great to show some more complex examples (at least in the experiment). As this paper deals with linear bandits, the proposed examples, including the toy examples and the experimental examples, are composed by arms along the axis, making them quite similar to K-armed bandits (with minor additional setups). \n\n[1] Wang Chi Cheung, David Simchi-Levi, Ruihao Zhu. _Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics_, PMLR 89:1079-1087, 2019."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024provably,\ntitle={Provably Efficient Linear Bandits with Instantaneous Constraints in Non-Convex Feature Spaces},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yOrtDi6IXs},\nnote={under review}\n}"
},
"abstract": {
"value": "In linear stochastic bandits, tasks with instantaneous hard constraints present significant challenges, particularly when the feature space is non-convex or discrete. This is especially relevant in applications such as financial management, recommendation systems, and medical treatment selection, where safety constraints appear in non-convex forms or where decisions must often be made within non-convex and discrete sets. In these systems, bandit methods rely on the ability of feature functions to extract critical features. However, in contrast to the star-convexity assumption commonly discussed in the literature, these feature functions often lead to non-convex and more complex feature spaces. In this paper, we investigate linear bandits and introduce a method that operates effectively in a non-convex feature space while satisfying instantaneous hard constraints at each time step. We demonstrate that our method, with high probability, achieves a regret of $\\tilde{\\mathcal{O}}\\big( d (1+\\frac{\\tau}{\\epsilon \\iota}) \\sqrt{T}\\big)$ and meets the instantaneous hard constraints, where $d$ represents the feature space dimension, $T$ the total number of rounds, and $\\tau$ a safety related parameter. The constant parameters $\\epsilon$ and $\\iota$ are related to our localized assumptions around the origin and the optimal point. In contrast, standard safe linear bandit algorithms that rely on the star-convexity assumption often result in linear regret. Furthermore, our approach handles discrete action spaces while maintaining a comparable regret bound. Moreover, we establish an information-theoretic lower bound on the regret of $\\Omega \\left( \\max\\{ \\sqrt{(d-1)T}, \\frac{1}{\\epsilon \\iota^2} \\} \\right)$ for $T \\geq \\max (d-1, \\frac{32 e}{\\epsilon \\iota^2})$, emphasizing the critical role of $\\epsilon$ and $\\iota$ in the regret upper bound. Lastly, we provide numerical results to validate our theoretical findings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Linear Bandits",
"Non-convex feature spaces",
"Instantaneous hard constraints",
"Safety",
"UCB"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f9f9cc88f14a91a65d6ad2c78c3e0117b2577e12.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/295707c7029b8913667fc9f04e2e9c0710f40d36.zip"
},
"title": {
"value": "Provably Efficient Linear Bandits with Instantaneous Constraints in Non-Convex Feature Spaces"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yP0iKsinmk | AdaFlow: Efficient Long Video Editing via Adaptive Attention Slimming And Keyframe Selection | main | Active | video editing;diffusion model;keyframe selection;token slimming | generative models | 3;5;6;6 | 5;4;5;4 | 3;3;3;3 | 2;2;3;3 | 2;3;3;3 | 5 | 4.5 | 3 | 2.5 | 2.75 | -0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What is the maximum number of frames each baseline can handle?\n- For the longer videos in the supplementary materials, are the baseline videos created by simply concatenating shorter clips?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Keyframe selection is an important topic in video editing and remains underexplored in prior work. The proposed adaptive keyframe selection based on dynamic content is a valuable contribution.\n- The proposed Long-V-Eval benchmark is valuable for advancing future video editing research.\n- The evaluation results in Table 1 and Table 2 outperform prior methods. Additionally, the supplementary videos demonstrate the proposed method's ability to achieve high visual quality."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the text-based video editing problem from a new angle compared to prior work, emphasizing keyframe selection and efficiency for generating longer videos. Building on keyframe translation and interpolation approaches from previous studies, the authors propose an Adaptive Keyframe Selection and Adaptive Attention Slimming scheme to enhance quality and handle longer videos with more transitions. The proposed method achieves superior results and efficiency compared to TokenFlow and other baselines. Additionally, the authors introduce a benchmark, LongV-EVAL, for evaluating long video editing tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Additional examples of keyframe selection would clarify the approach, such as cases where changes in object angle or the appearance of new objects lead to their selection as keyframes.\n- The results appear quite similar to TokenFlow; further clarification on this similarity would be beneficial. Also, the videos in the supplementary materials look slightly oversmoothed, with some detail loss and minor color mixing from the background compared to TokenFlow.\n- The ablation study is somewhat limited. There is no analysis of adaptive attention slimming, and it would be beneficial to include quantitative results from the ablation study."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper presents AdaFlow, a training-free approach that effectively addresses memory constraints, offering a feasible alternative for handling longer videos compared to traditional methods.\n\n- AdaFlow combines Adaptive Attention Slimming and Adaptive Keyframe Selection to optimize memory usage and frame selection, respectively. This combined approach not only reduces computational load by focusing on essential tokens in self-attention but also enhances editing quality by selecting keyframes that capture critical scene changes.\n\n- The introduction of LongV-EVAL provides the field with a dedicated benchmark for long video editing, complete with detailed annotations and varied scenarios, which can serve as a valuable tool for assessing future developments in this area.\n\n- Initial results on LongV-EVAL indicate that AdaFlow may outperform existing methods in both efficiency and quality, positioning it as a promising approach for text-driven long video editing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces AdaFlow, a training-free method designed to address memory constraints in long video editing. AdaFlow incorporates two key strategies: Adaptive Attention Slimming, which selectively reduces token use in self-attention to decrease memory requirements, and Adaptive Keyframe Selection, which optimizes frame selection for enhanced editing quality. According to the authors, these techniques enable AdaFlow to handle videos exceeding 1,000 frames on a single A800 GPU, reportedly achieving lengths ten times greater than prior methods. The paper also presents LongV-EVAL, a benchmark for assessing long video edits, where AdaFlow demonstrates potential advantages in both efficiency and quality over existing approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Insufficient Evidence for Claimed Contributions:** Although the paper lists three main contributions, some are not substantiated in sufficient detail. For instance, the claim of “effective” memory optimization mainly references spatial memory savings, with no discussion of runtime performance or the extra computation that Adaptive Keyframe Selection and Adaptive Attention Slimming might add. Additionally, the benchmark’s description in the paper is limited, with definitions for key evaluation metrics lacking clarity.\n\n2. **Limited and Incomplete Experiment Comparisons:** The experiments primarily compare AdaFlow to methods focused on consistency in short video editing, lacking comparisons to dedicated long video editing techniques. Furthermore, the ablation study is minimal, with few quantitative measures and no in-depth analysis of other components within the approach. Relying on visual clarity in a few cases does not provide sufficient evidence of AdaFlow’s overall performance.\n\n3. **Unclear Visualization and Algorithm Details:** The pipeline visualization, particularly for the AKS module, is difficult to interpret, and specific steps like the \"window_check\" are not well-explained, leaving some ambiguity regarding the AKS process and its impact on overall results.\n\n4. **Limitations of Feature-Matched Latent Propagation:** If the intended edits involve adding new content or altering major background elements, rather than just subtle changes (e.g., color or style shifts), the proposed feature-matched propagation approach may fail to preserve coherence, potentially limiting its applicability for more complex or structural video modifications."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What are the respective advantages and disadvantages of this paper and previous neural representation-based long video editing methods in terms of performance? Some methods should be discussed [1-3].\n\n2. Why not show the effect of shape editing on video content? This kind of editing is much stronger than the recoloring shown in the paper, and it is more difficult to achieve, but more practical.\n\n[1] Huang J, Sigal L, Yi K M, et al. Inve: Interactive neural video editing[J]. arXiv preprint arXiv:2307.07663, 2023.\n\n[2] Kasten Y, Ofri D, Wang O, et al. Layered neural atlases for consistent video editing[J]. ACM Transactions on Graphics (TOG), 2021, 40(6): 1-12.\n\n[3] Yang S, Mou C, Yu J, et al. Neural video fields editing[J]. arXiv preprint arXiv:2312.08882, 2023."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed AAS is interesting, which only focuses on the the area to be edited in KV sequences, greatly increasing GPU memory.\n2. The task is meaningful and the performance is practical. By combining keyframe editing/propagation and attention slimming, this method enables 1k frames editing on a single GPU, which is impressive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The proposed AdaFlow enables 1k frames editing in one inference. This is realized with adaptive keyframe selection and attention slimming. In addition, this paper developed a new benchmark, LongV-EVAL, for long video editing evaluation. Experiments proves its efficient editing performance and diversity of editing types. However, this paper avoids shape editing of the video content and only shows the effect of recoloring, which is less intensive and difficult than shape editing. Considering the importance and practical value of shape editing, it is necessary to explain whether this paper can achieve this function. If this method cannot edit shape of the objects well, then the statement in the paper about \"supports foreground changes\" is wrong and needs to be corrected."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lacking some necessary discussion. As far as I know, some neural representation based video editing can also achieves consistent long video editing (I listed some of them in Questions). These methods attempts to resolve long video editing via neural representation rather than adjusting attention in diffusion model, which also achieves impressive performance. It is necessary to discuss the advantages and disadvantages of AdaFlow and these methods.\n\n2. Lack of necessary editing results, mainly editing the shape of objects. Although authors claimed that AdaFlow supports various editing tasks. In the paper, I only found some results of the color changing, including style transfer, background or foreground recoloring. However, various SOTA video editing methods enables deformation, that is, changing the shape of the specified objects. I believe this is a very significant function and it is much more difficult to edit than recoloring. The author should provide adequate results of editing the shape of the object (e.g. removing the foreground of a video, or turning the bird in Fig.2 into a squirrel, etc.). If this is not possible, it should be acknowledged that good deformation results cannot be achieved due to the limitations of the paper's technology."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The paper's novelty appears limited when compared to Token-Flow, with results showing minimal differentiation except for the bike case (Fig.3, the last case). I recommend:\n\n1. Providing 2-3 comparative examples using Token-Flow's official cases\n2. Including a discussion on how different attention-slimming strategies impact the results"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The chosen problem setting is novel, as currently there are no video-editing solutions that can handle thousands of frames effectively.\n\n2. The efficiency optimizations are noteworthy, particularly the single correspondence computation requirement compared to other methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes an efficient long video editing method capable of performing video appearance editing on 1k frames within minutes. To accomplish this, the authors developed an attention slimming + key-frame selection approach and established a long-video editing benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The results appear to differ from traditional video editing, resembling more appearance editing focused on texture-level modifications. Even Token-Flow, which addresses similar tasks, demonstrates more ambitious edits (e.g., woman > Pixar animation, car > ice sculpture).\n\n2. The feature similarity-based keyframe selection doesn't appear particularly innovative.\n\n3. Several concerns regarding the attention slimming operation:\n - The KV token selection strategy appears static regardless of the editing prompt (e.g., not specifically selecting tokens from editing-relevant regions)\n - The paper lacks comparison with other extended self-attention slimming methods, such as random sampling in StoryDiffusion\n - The correspondence-based filtering may not necessarily capture representative compressed features for the entire video (compared with random sampling)\n\nReference: StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "AdaFlow enables efficient, high-quality editing of minute-long videos by adaptively selecting keyframes and pruning redundant tokens, achieving state-of-the-art results on a single GPU."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024adaflow,\ntitle={AdaFlow: Efficient Long Video Editing via Adaptive Attention Slimming And Keyframe Selection},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yP0iKsinmk},\nnote={under review}\n}"
},
"abstract": {
"value": "Text-driven video editing is an emerging research hot spot in deep learning. Despite great progress, long video editing is still notoriously challenging mainly due to excessive memory overhead. To tackle this problem, recent efforts have simplified this task into a two-step process of keyframe translation and interpolation generation, enabling the editing of more frames. However, the token-wise keyframe translation still plagues the upper limit of video length. In this paper, we propose a novel and training-free approach towards efficient and effective long video editing, termed AdaFlow. We first reveal that not all tokens of video frames hold equal importance for keyframe-consistency editing, based on which we propose an Adaptive Attention Slimming scheme for AdaFlow to squeeze the $KV$ sequence of extended self-attention. This enhancement allows AdaFlow to increase the number of keyframes for translations by an order of magnitude. In addition, an Adaptive Keyframe Selection scheme is also equipped to select the representative frames for joint editing, further improving generation quality. With these innovative designs, AdaFlow achieves high-quality long video editing of minutes in one inference, i.e., more than 1$k$ frames on one A800 GPU, which is about ten times longer than the compared methods. To validate AdaFlow, we also build a new benchmark for long video editing with high-quality annotations, termed LongV-EVAL. The experimental results show that our AdaFlow can achieve obvious advantages in both the efficiency and quality of long video editing. Our code is anonymously released at https://anonymous.4open.science/r/AdaFlow-C28F."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"video editing",
"diffusion model",
"keyframe selection",
"token slimming"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/64cfccc3ff59ada847daec3d8399cd77d8caf0d5.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/fbc8c97a3722b16443b9d1c6d1be25d05bd94a92.zip"
},
"title": {
"value": "AdaFlow: Efficient Long Video Editing via Adaptive Attention Slimming And Keyframe Selection"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yPxhj1FKhG | APCtrl: Adding Conditional Control to Diffusion Models by Alternative Projection | main | Active | Diffusion Models;Condition Diffusion;Alternative Projection;Control-on-Training;Control-on-Sampling | generative models | 3;3;5 | 4;4;3 | 2;2;2 | 2;2;2 | 1;2;2 | 3.666667 | 3.666667 | 2 | 2 | 1.666667 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- How is the inverse problem in Eqn7 being addressed? Assuming you are leveraging the gradient information, which algorithm are you using?\n\n- Regarding the experiments on compatibility with various diffusion backbones (lines 409-411), could you clarify the statement “APCtrl supplies Condition 1, complemented by Condition 2”? How are these conditions integrated into the sampling algorithm?\n\n- Have the authors compared the performance of unplugging versus plugging in the control step? The authors should consider reporting the performance of their backbone as a baseline, especially when the modification to the latent states might harm the generation process."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors perform extensive experiments, presenting results across multiple scenarios. They also demonstrate the efficiency of their method, particularly in comparison with control-on-training methods which require refinement of the latent space and incur retraining costs for the diffusion models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents APCtrl, a method for conditional diffusion generation. Leveraging the idea of alternating projection, the authors propose to project the latent variable $\\mathbf{z}_t$ of the latent diffusion model at each timestep onto a denoising set and a condition set, aiming to locate a point within the intersection of two sets and simplify the conditional sampling process to recursive projections."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The method’s soundness is questionable.\n - The main formula in Eq. 9 appears arbitrarily constructed. While classical alternating projections [1] update iteratively between two convex sets, Eqn. 9 employs four composite functions $(\\mathcal{D}_t, \\mathcal{D}_{t+1}, \\mathcal{C}_t, \\mathcal{D}_{t+1})$ (i.e., projection onto \\mathcal{D}_{t+1} is applied twice), adding two extra projections onto the denoising sets after alternating between the condition set and the denoising sets. The authors do not give a clear rationale or justification for this modification while adding potentially unnecessary complexity to the calculations.\n - The results are concerning. Despite offering various visual and quantitative comparisons, the empirical improvements appear limited. In Table 1, the primary metric (FID score) is worse than that of comparison methods, suggesting that the modified latent states $\\mathbf{z}_t$ may not adequately lie within the ideal intersection, thus reducing generation quality. Additionally, the visualizations in Figure 8 indicate that further iterations of the method may produce artificial-looking images compared to more natural scenes.\n\n2. Clarity: There are notable issues with notation consistency and clarity throughout the paper. \n - Lines 196-197: the vector $\\mathbf{x}$ is never introduced in the paper.\n - Line 203: The denoiser $\\mathcal{Z}$ appears unexplained and is absent from the preceding formula.\n - Line 217: $\\mathbf{z}_{0|t}$ is not defined and it’s hard to understand why the approximation holds especially when $\\mathbf{z}_t$ indicates a noisy state.\n - Algorithms: (i) At step 4, defining $\\mathcal{J}_t$ is unnecessary as the method does not explicitly calculate the intersection set. (ii) In the highlighted blue block, there is a loop over $n$ while $n$ is neither defined in the draft nor included in the algorithm’s input. (iii) It’s unclear why $\\mathcal{C}_t$ serves as an algorithm input, as it appears to be the target solved iteratively in Eqn. 5.\n - In Section 3.2, the authors review a few concepts and methods to compute the latent control. However, they fail to cite reference papers and do not give sufficient background information. For example, in line 213, they mention an encoder $\\mathcal{E}$ for processing the control image, however, it is not clear what’s the role of this encoder and if they need it to be pre-trained.\n\n[1] Relaxed alternating methods, Cegielski and Suchocka, 2008 SIAM"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "N/A"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The topic of enhancing the versatility of pretrained diffusion models is of great interest, and this work has exerted certain efforts in this regard."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper integrates concepts derived from alternative projection techniques and conditional control with the aim of enhancing diffusion models. It provides practical insights regarding recursive projection for controllable generation, although the theoretical/mathematical insights are somewhat limited. Moreover, it showcases examples that effectively demonstrate the efficiency of condition handling."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It seems to me that the novelty and significance of this work are insufficient for ICLR. This is especially the case considering that Latent Control is a method that has been introduced previously. Moreover, the training approach described entails retraining the latent network whenever a new model or dataset is employed.\n\n2. In many cases, the quantitative results presented in Table 2 fail to demonstrate the advantages of the proposed APCtrl approach in comparison with competing methods like ControlNet and ControlNet++.\n\n3. In Section 2, I was confused by Figure 1. The direction of the arrows doesn't match the logic of the algorithm. A more detailed explanation is required to clarify its connection with Algorithm 1. Alternatively, a more intuitive illustration would be beneficial.\n\n4. There are several instances where definitions appear somewhat unconventional. I would recommend revisiting these sections to ensure clarity and adherence to standard notation:\n\n- The definition of $\\mathfrak{J}_t := \\mathfrak{D}_t \\cap \\mathfrak{C}_t$ appears rather strange. The four operations defined on Page 16 for $\\mathfrak{J}_t$ consist of two noise-adding and two denoising steps. However, it seems that these operations do not actually bring about a denoising effect.\n\n- While the definition of $\\text{Proj}_{\\mathfrak{C}_t} (\\mathbf{z}_{t+1})$ in Eq. (7) is comprehensible, the variable within the parentheses does not show up in the expression itself but only in the initialization. This makes the definition rather unconventional.\n\n- In Section 3, there seems to exist a conflict within the definitions. Specifically, the \"Up Projection\" $\\mathrm{Proj}_{\\mathfrak{D}_{t+1}}(\\mathbf{z}_t)$ in Eq. (8)\n\n and the denoising projection $\\mathrm{Proj}_{\\mathfrak{D}_{t}}(\\mathbf{z}_{t+1})$ in Eq. (6) are in conflict, \n\nespecially when considering $\\mathrm{Proj}_{\\mathfrak{D}_{t+1}}(\\mathbf{z}_{t+2})$. \n\n5. According to the FORMATTING INSTRUCTIONS FOR ICLR2025 CONFERENCE SUBMISSIONS: \"The text must be confined within a rectangle 5.5 inches (33 picas) wide and 9~inches (54 picas) long. The left margin is 1.5 inch (9 picas). Use 10 point type with a vertical spacing of 11 points. Times New Roman is the preferred typeface throughout\", and \"Do not change any aspects of the formatting parameters in the style files. In particular, do not modify the width or length of the rectangle the text should fit into, and do not change font sizes (except perhaps in the References section)\". However, this submission seems to employ some strange typeface (and the mathematical notations are rather unusual), which makes it somewhat uncomfortable for me to read. Moreover, the texts within the tables appear to be in smaller font sizes. Therefore, I am uncertain as to whether this submission should be desk-rejected or not.\n\n6. Some minor problems: \n\n- There seems to be a typo in the title of Section 3.2, “From Latent Control to Latent Control”, as this part discusses the differences between Pixel Control and Latent Control.\n\n- I found that the layout of the figures and tables in the paper causes inconvenience for the reader. The issues include the following: Table 2 is presented in Section 4.2, yet it actually pertains to Section 4.4. Figure 4 is shown in Section 4.2, but it belongs to Section 4.3. Additionally, other elements such as Table 3 and Figure 3 also contribute to this inconvenience."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* In the abstract the projecting operator on line 17 is not defined. It is hard to understand what is meant here for the update of $z_t$.\n\n* Calling the denoising step a denoising projection is a stretch. I think this might be misleading as it is not performing a projection in the mathematical sense. If there is truly a mathematical statement behind this assertion I would like to see a proof and a proper statement. \n\n* In Section 3.2, the title is “From latent control to latent control”. I think it should be “From pixel control to latent control”. \n\n* No limitation is stated in the paper."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* I appreciate the extensive experimental set up. The authors have performed comparisons with a large number of control methods for diffusion models with a large number of possible conditionings (depth, canny, HED, M-LSD, segmentation…). These comparisons are both quantitative and qualitative. They tried a variety of samplers and diffusion backbones. They also perform efficiency comparisons for the training of the network in APCtrl.\n\n* The paper is nicely structured and easy to follow. I have some concerns regarding the mathematical rigor of certain statements (see the section on Weaknesses below) but overall I am satisfied with the quality of the writing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors introduce a method to include user control in diffusion models using control-on-sampling techniques. They present a framework that reformulates the denoising diffusion step as a projection step. By introducing another operator which projects the current noisy sample onto a set which matches some condition with respect to a reference image, they can then apply the alternative projection paradigm in order to sample with the given reference image condition. They perform extensive experiments and comparison on text-to-image experiments with a backbone given by Stable Diffusion v1.5."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* My main concern is the limited novelty of the approach when it comes to the alternate projection method. While a limited novelty is not a problem per se, as the experiments are strong I think that extremely relevant works have been omitted in the related work and could also be included in the experimental comparison. For instance, there is a line of work in [1,2,3] see [4] for a review of the work which also works under constraints. Albeit the goal of these methods is different from refined user control as investigated in the current paper, the goal of sampling from a posterior distribution is more general. I think revisiting the algorithm and the contributions of the authors in the light of these papers is necessary.\n\n* The contributions are oversold. The paper asserts that the method is a Control-on-Sampling approach, see the caption of Figure 5 for instance “our method is a Control-on-Sampling approach”. I would like to challenge this idea since the current method requires training a neural network due to the latent conditioning. This is in stark contrast with other Control-on-Sampling methods such as FreeDom, DSG or UniGuid (see Table 3). Second, the paper claims that among Control-on-Sampling methods their approach is the only one that is “Controlled by Multi-Condition”. However in the Universal Guidance paper, it is stated that “We propose an algorithm that enables universal guidance for diffusion models. Our proposed sampler evaluates the guidance models only on denoised images, rather than noisy latent states. By doing so, we close the domain gap that has plagued standard guidance methods. This strategy provides the end-user with the flexibility to work with a wide range of guidance modalities and even multiple modalities simultaneously. The underlying diffusion model remains fixed and no finetuning of any kind is necessary.” Can the authors elaborate on this? The corresponding column in Table 1 does not seem like a fair assessment of competing contribution. Finally, I would also argue that Universal Guidance [5] is also a method that satisfies “Sampling Agnosticism”. Indeed, the authors only propose an update of the $\\varepsilon$ prediction (see Equation 4 in [5]). Since $x_0$-prediction and all other predictions are related to the $\\varepsilon$-prediction with a one-to-one mapping the proposed method is also sampling agnostic. \n\n* I find it quite suspicious that a large number N of iterations hurts the generation quality as depicted in Figure 8. If anything, following the explanations of the authors, the image should continually be refined. Of course there should be a cost of using large values of $N$ but I am having trouble understanding why the quality is also decreasing. It would be good (and I think necessary) to explain this phenomenon.\n\n[1] Chung et al. – Diffusion Posterior Sampling for General Noisy inverse problems \n\n[2] Chung et al. – Decomposed Diffusion Sampler for Accelerating Large-Scale Inverse Problems\n\n[3] Chung et al. – Improving Diffusion Models for Inverse Problems\nusing Manifold Constraints\n\n[4] Peng et al. – Improving Diffusion Models for Inverse Problems Using Optimal Posterior Covariance\n\n[5] Bansal et al. – Universal Guidance for Diffusion Models"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\ndai2024apctrl,\ntitle={{APC}trl: Adding Conditional Control to Diffusion Models by Alternative Projection},\nauthor={Longquan Dai and He Wang and Jinhui Tang},\nyear={2024},\nurl={https://openreview.net/forum?id=yPxhj1FKhG}\n}"
},
"abstract": {
"value": "Enhancing the versatility of pretrained diffusion models through advanced conditioning techniques is crucial for improving their applicability. We present APCtrl, a novel conditional image generation approach that formulates the latent \\( \\dmrv{z}_\\dms{t} \\) at timestep \\( t \\) as the projection \\( \\dmrv{z}_\\dms{t} = \\text{Proj}_{\\bmfrakD_\\dms{t}} (\\dmrv{z}_{ \\dms{t} + \\dms{1} }) \\) onto the denosing set \\( \\bmfrakD_\\dms{t} \\). For conditional control, APCtrl integrates the condition set \\( \\bmfrakC_\\dms{t} \\), defined by a latent control network \\(\\bmcalA_{\\dmv{theta}}(\\cdot, \\cdot)\\). Our method simplifies conditional sampling to recursive projections \\( \\dmrv{z}_\\dms{t} = \\text{Proj}_{\\bmfrakI_\\dms{t}} \\circ \\text{Proj}_{\\bmfrakD_\\dms{t}} (\\dmrv{z}_{ \\dms{t} + \\dms{1} }) \\), where each projection step integrates both the diffusion and condition priors. By employing Alternative Projection, our approach offers several key advantages: 1. Multi-Condition Generation: easily expandable with additional conditional sets; 2. Model and Sampling Agnosticism: works with any model or sampling method; 3. Unified Control Loss: simplifies the management of diverse control applications; 4. Efficiency: delivers comparable control with reduced training and sampling times. Extensive experiments demonstrate the superior performance of our method."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Longquan_Dai2",
"~He_Wang12",
"~Jinhui_Tang1"
]
},
"authors": {
"value": [
"Longquan Dai",
"He Wang",
"Jinhui Tang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion Models",
"Condition Diffusion",
"Alternative Projection",
"Control-on-Training",
"Control-on-Sampling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "dai|apctrl_adding_conditional_control_to_diffusion_models_by_alternative_projection"
},
"pdf": {
"value": "/pdf/b0687cb5d261a45f5866c78fee15272855950ccc.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "APCtrl: Adding Conditional Control to Diffusion Models by Alternative Projection"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yPyb2j7oZc | ReZero: Boosting MCTS-based Algorithms by Backward-view and Entire-buffer Reanalyze | main | Active | Deep Reinforcement Learning;Monte Carlo tree search;MuZero;efficiency optimization;reanalyze | reinforcement learning | 3;3;5;6 | 4;2;5;2 | 1;2;2;3 | 2;1;1;3 | 2;1;2;3 | 4.25 | 3.25 | 2 | 1.75 | 2 | -0.037037 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "ReZero demonstrates a reduction in wall-clock time needed to achieve comparable performance levels compared to baseline MCTS-based algorithms."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In MuZero, the \"reanalyze\" mechanism enhances sample efficiency by revisiting and updating past experiences stored in the replay buffer. In this paper, the authors propose a method to use information from future time steps during the reanalyze phase to reduce the search space and accelerate individual MCTS runs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The writing is not clear enough for the reader to understand exactly what the algorithm is doing. I had a hard time understanding Section 4.1, and Figure 2, so I am not able to evaluate this paper too well.\n- To my best guess, the authors are proposing to use previous MCTS runs to approximate the value of nodes in future MCTS runs. This is based on the \n- I don't think Theorem 1, which seems to be based on existing work, is properly grounded in the current setting. This is because MCTS is not a typical bandit: I recommend the authors take a look at the Shah et al. paper \"Non-asymptotic analysis of monte carlo tree search\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Why MuZero is the only baseline authors compare with?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written and authors include easy-to-understand figures\n- The authors propose a theoretical justification of their framework\n- Presented experimental results clearly show the proposed method is faster (in term of wall-clock time) than MuZero"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes an improvement to the MuZero algorithm. Instead of trying the child node selection as a multi-armed bandit problem, the method treats it as one-armed bandit problem, that is a problem of selecting between one stochastic arm or a sure pay-off with a known value. Since this true value is unknown, of course, authors propose a backward search technique that produces an estimate of it."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There only baseline authors compare against is the MuZero algorithm. However, in the related work section authors mention many improvements proposed to the MuZero baseline to make it faster."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I have stated numerous questions in context in the weaknesses section. I would appreciate if the authors address them during the rebuttal phase."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- The subject that the paper addresses is very important. Indeed, improving the sample efficiency of MCTS methods such as MuZero, along with the wall-clock time they incur during tree search could dramatically improve the current MCTS methods.\n- Explaining MCTS methods can be a difficult task, thus using illustrations such as Figure 2 helps a lot in explaining the approach.\n- There is an attempt at providing finite-time theoretical analysis, in the form of a bound on the expected number of visits to suboptimal actions. Such attempts are common within the Multi-Armed Bandits literature but are less frequent in MDP contexts. It is therefore appreciated that the authors made this theoretical effort.\n- The experiments, if they are reproducible with a publicly available code, seem to support the authors claims. However, I am not confident enough to assess the experimental setup accurately.\n- The contribution is orthogonal to previously proposed solutions. This implies that ReZero is readily applicable with the other proposed methods from the literature."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Algorithms like MuZero extend the use of MCTS to environments without known models. However, its extensive tree search incurs a substantial time overhead. This pressing matter has motivated many research papers to propose mechanisms mitigating this wall-clock time problem. The authors in this paper propose a new algorithm of this sort, called ReZero, which is orthogonal to the previous contributions, thus making it readily deployable with many of the previous works. ReZero leverages a backward-view reanalyse technique that prunes the exploration of certain specific nodes by using previously searched root values. ReZero also reanalyses the whole buffer periodically after a fixed number of training iterations. In tandem with this reanalyse technique, ReZero employs a search strategy that is akin to a one-armed bandit algorithm. The authors then analyse ReZero from a theoretical point of view. In Theorem 1, a bound on the expected number of suboptimal action visits is proposed. Moreover, this bound implies that this expected number of suboptimal action visits is sublinear, i.e. $\\lim_{n \\rightarrow \\infty}\\frac{\\mathbb{E}\\left[ T_i(n)\\right]}{n} = 0$. In the Appendix, the authors also claim to provide such bound for AlphaZero. Empirical results seem to indicate that ReZero-M, which is ReZero employed with MuZero with SSL, can significantly decrease the wall-clock time compared to MuZero. Similar empirical results are stated in the Appendix comparing ReZero-E with EfficientZero."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I see two significant weaknesses with this paper. Lack of clarity and mistakes in the theoretical result.\n\n## Lack of Clarity:\n\n- In Algorithm 1 there are many undefined functions: prepare, origin\\_MCTS, select\\_root\\_child, traverse . I did not find these defined in the Appendix either. It is hard for the reader to understand the algorithmic steps from the provided code. I suggest that the authors rather provide an abstract pseudocode or thorough explanations of each step in an iteration, accompanied by some simple illustration. I struggle to deeply understand ReZero, even now I am unsure that I grasp it sufficiently.\n- From Figure 2, the considered MDPs seem to have deterministic transition dynamics. This is also implied by Eq. (5). Is ReZero specific to deterministic transition dynamics? If that is the case, then it should be mentioned while introducing the method. Otherwise, Eq. (5) should account for the stochasticity of transitioning to a certain state through a (state, action) pair.\n- Section 4.1 is hard to follow. Does ReZero follow the standard MCTS steps: Selection, Expansion, Simulation and Backpropagation? How is the data collected in the replay buffer? In Section 4.3, a policy network will be briefly mentioned, which implies the use of DeepRL. It makes hard for the reader to follow the explanation with scattered information like this. I only realised that there is a DeepRL approach later on, which confused me. What is the network approximating? Is it just the policy or the value function as well? How are the targets constructed in this case? And how are the values updated? There are too many questions here related to the algorithmic machinery itself that I think the authors should spend time explaining. As a suggestion, I think that the authors should delete section 3.1, which I did not find helpful, and spend more space and effort on clearly stating and explaining the algorithmic steps. Figures like Figure 1 and 2 could also be very helpful here, even if put in an illustrative appendix. Figure 3 to me does not seem standalone, it could be useful but only accompanied by this large section just defining the algorithm fully.\n- It is unclear how the mini batches come to existence. Do they stem from filling the replay buffer by following the Tree and Simulation policies (as per the standard MCTS terminology)? If so, how are all the trajectories in Figure 2 of the same length? Is this just for illustrative purposes or are they indeed of the same length?\n- Could you provide a more thorough explanation of traverse in Line 240? Is it following the Tree and Simulation policies? When does traverse happen compared to reanalyse? I thought it happened at the end of reanalyse but Algorithm 1 suggests otherwise.\n- What if during reanalyse, two actions (or more) $a_1, a_2$ have been taken at state $S_l^t$ (in different trajectories)? Would we have $I_l^t\\left( a_1\\right) = r_1 + \\gamma m_1, I_l^t\\left( a_2\\right) = r_2 + \\gamma m_2$? in which case, the policy in (4) and (5) will not taken into consideration the UCB scores of these actions but rather their estimates directly. If many actions of this sort are explored, then how would exploration occur? I think I am missing a crucial detail about the algorithm here, hence why I suggest that the authors restructure the paper in a way that prioritises much more a clear explanation of ReZero.\n- In (5), $I_t\\left( a\\right)$ should be denoted $I_t^l\\left( a\\right)$ for consistency of the notation with (4).\n- This is not a clarity issue per se, nevertheless it should be mentioned. Regarding the non-stationary bandit in (2), the concentration assumption should be around $\\mu_{is} = \\mathbb{E}\\left[ \\overline{X_{is}} \\right]$ rather than $\\mu_i$ for a meaningful analysis. It is true that the text in the paper \"Bandit-based Monte Carlo Planning\" is unclear about this. In page 5, the authors just \"assume the tail inequalities (3), (4)\" and we it is unclear whether they mean $\\mu_{is}$ or $\\mu_i$. However, in a follow-up journal publication by the same authors \"Improved Monte Carlo Methods\" where they thoroughly rewrite the previous paper and correct some of its theoretical claims, the authors clarify this misunderstanding in Assumption 1. They indeed consider the concentration to be around $\\mu_{is}$. If $\\delta_{is} = \\mu_{is} - \\mu_i = \\mathcal{o}\\left( \\sqrt{\\frac{1}{s}}\\right)$ the Assumption 1 would indeed imply (2) in the reviewed paper for an appropriate constant $C$. However, I believe that no such assumption was made.\n\nTo recapitulate this section. I believe that most of the confusion comes from a lack of investment in clearly explaining the algorithm ReZero. I suggest that the authors take this into consideration when revising the paper. Some of the ways to make this improvement is through illustrations, pseudocode and accompanying text. Some sections like 3.1 could be deleted in favour of this restructuring.\n\n## Theoretical mistakes:\n\nI love the fact that the authors committed time to theoretically analyse their algorithm, I encourage such endeavour. I would like to draw the attention of the authors to a number of mistakes in their proofs that unfortunately end up not supporting their claims. At the end of this section, I will provide references that I hope will be helpful to the authors in rethinking their proof approach in the future.\n- First, the statement of Theorem 1 is not rigorous. The terms $P_i$ are nowhere defined. I understood from context and the proof that they are the prior terms $P\\left( s, a\\right)$ introduced in $3$, nevertheless, they should be properly defined in the theorem statement. The same remark applies to other terms like $\\widehat{\\mu}_{ln}$. Moreover, and even more importantly, What does it mean precisely to choose actions according to $(5)$? A first understanding would lead the reader to believe that once an arm $i$ is played, its index $I_i^t$ will always be equal to its reward, thus removing the UCB exploration bonus, but it seems to me unlikely that this is what is meant by the authors as it would imply full exploitation algorithm. Please provide the precise statement of how actions are chosen.\n- General advice that the authors could incorporate straightforwardly. Please start using \\left( , \\right) , \\Bigg\\{ , \\Bigg\\} ... for clearer mathematical notation.\n\n### Lemma 1:\n- Minor: Line 706, put a space between \"Let\" and \"$\\widehat{\\mu}_t$\".\n- Minor: I think the term $\\exp\\left( \\frac{\\sqrt{a}\\epsilon}{C^2}\\right)$ should rather be $\\exp\\left( 2\\frac{\\sqrt{a}\\epsilon}{C^2}\\right)$ in (13). This is a minor detail as it does not change the result, but could you please verify this?\n\n### Theorem 1:\n- $\\widehat{\\mu_{is}}$ is the average of the first $s$ samples of arm $i$, thus I think that the notation $\\widehat{\\mu}_{ln}$, in the theorem statement, is inadequate as it implies that arm $l$ has been chosen $n$ times. It should rather be replaced by $\\widehat{\\mu}_l\\left( n\\right)$.\n- $T_i\\left( n\\right) = \\sum_{t=1}^n \\mathbb{I}\\\\{ A_t = i \\\\}$, thus $A_t$ denotes the chosen arm at time $t$. This definition should be stated during the proof.\n- I believe there is a mistake in (24). For the passage from (23) to (24), the authors employ $\\sum_{t=2}^{n}\\exp\\left( -a\\sqrt{t-1}\\right) \\le \\int_{t=1}^\\infty \\exp\\left( -a\\sqrt{t-1}\\right)dt$ but they do not make the calculation explicitly. I do it below via change of variable $u = \\sqrt{t-1}$:\n\\begin{align*}\n\\int_{t=1}^\\infty \\exp\\left( -a\\sqrt{t-1}\\right)dt &= \\int_{u=0}^\\infty 2u\\exp\\left( -au\\right)du\\\\\\\\\n&= \\left[ -\\frac{2u}{a}\\exp\\left( -au\\right)\\right]_{0}^\\infty + \\int_0^\\infty \\frac{2}{a}\\exp\\left( -au\\right)du\\\\\\\\\n&= \\frac{2}{a}\\left[ -\\frac{1}{a}\\exp\\left( -au\\right)\\right]_0^\\infty = \\frac{2}{a^2}\\\\\\\\\n\\implies \\sum_2^n\\exp\\left( -\\frac{1}{C^2}\\epsilon P_1 \\sqrt\\{t-1}\\right) &\\le \\frac{2C^4}{\\epsilon^2 P_1^2}\n\\end{align*}\nThis in turn implies that the term in (24) should be replaced with $1 + \\frac{2C^6}{\\epsilon^4 P_1^2}$.\n- The passage from (26) to (27) is wrong, and this is what will unfortunately go against your claim. I invite the authors to discuss this important specific point with me during the rebuttal. There is an omitted sum here. When upper bounding the term in (26), you should proceed as follows:\n$$\n\\mathbb{E}\\left[ \\sum_{t=1}^n\\sum_{s=1}^{t-1} \\mathbb{I}\\Bigg\\\\{ \\widehat{\\mu}_{is} + P_i\\sqrt{\\frac{n-1}{\\left( 1 + s\\right)^2}} \\ge \\mu_1 - \\epsilon\\Bigg\\\\}, T_i\\left( t-1\\right) = s\\right]\n$$\nThen this term will be further upper bounded by omitting the event $T_i\\left( t-1\\right) = s$. For a similar illustrative example of this sum, please refer to the paper \"Finite-time Analysis of the Multiarmed Bandit Problem\", especially the proof of Theorem 1, check (6). Now this double sum will lead to the following:\n$$\n(25) \\le n + \\frac{2\\left( n-1\\right)\\sqrt{P_i^2 \\left( n-1\\right)}}{\\Delta_i - \\epsilon} + \\frac{\\left( n-1\\right)C^2}{\\left( \\Delta_i - \\epsilon\\right)^2}\\exp\\left( \\frac{2\\left( \\Delta_i - \\epsilon\\right)\\sqrt{P_i^2 \\left( n-1\\right)}}{C^2}\\right)\n$$\nThus leading to:\n$$\n\\mathbb{E}\\left[ T_i\\left( n\\right)\\right] \\le 1 + \\frac{2C^6}{\\epsilon^4 P_1^2} + n + \\frac{2\\left( n-1\\right)\\sqrt{P_i^2 \\left( n-1\\right)}}{\\Delta_i - \\epsilon} + \\frac{\\left( n-1\\right)C^2}{\\left( \\Delta_i - \\epsilon\\right)^2}\n$$\nNow unfortunately the term $n + \\frac{2\\left( n-1\\right)\\sqrt{P_i^2 \\left( n-1\\right)}}{\\Delta_i - \\epsilon} + \\frac{\\left( n-1\\right)C^2}{\\left( \\Delta_i - \\epsilon\\right)^2}$ is not sublinear and as such you can no longer deduce that $\\mathbb{E}\\left[ T_i\\left( n\\right)\\right]$ is sublinear. In fact that is the reason why UCB, in the Multi-Armed Bandit (MAB) setting, employs a logarithmic term in the index $\\widehat{\\mu}_i\\left( t-1\\right) + C\\sqrt{\\frac{\\log t}{1 + T_i\\left( t-1\\right)}}$ as opposed to a polynomial term. $\\log t$ slows down exploration thus leading to the concentration of visits to the optimal arm. With a polynomial term in $t$, this is no longer guaranteed because the index grows quickly leading to too much exploration even for the suboptimal arms. Now the reason AlphaZero employs a polynomial term instead of a logarithmic term is because there is a need for a lot more exploration in MDPs than it is for MABs. You need a lot more samples to estimate the value of a node in an MDP than you need to estimate the value of an arm in a MAB.\n- Suggestions to solve this issue. I think there might be a need to change the way you define the index of your UCB. For finite-sample analyses of UCT-like algorithms, please check the following paper \"Nonasymptotic Analysis of Monte Carlo Tree Search\".\n\n## Experiments:\n\nI do not know how to accurately assess the experimental setup as I feel like I'm not fully grasping the algorithm itself to pinpoint the merits of its contributions in the experiments. Nevertheless, if ReZero-M is just ReZero applied to MuZero, then it does hint at some substancial speedups. My concerne is with variance. From my understanding of your reanalyse strategy, you can stop searching a node prematurely, and with less samples to estimate its values, the variance of this estimate could be important. I think the authors should spend some space discussing this issue. Figure 7, SeaquestNoFrameskip-v4, Gomoku do hint at this phenomenon, while we see that MuZero's variance is stable in these cases.\n\n## Misspelling:\n\nThere are a multitude of misspellings in the paper. Although this is a minor issue, the authors should take the time to revise their paper accordingly.\n- Line 308: \"in in\".\n- Line 059: \"we aims\".\n- Line 084: \"a efficient\"\n- Line 127: \"a last observation sequences\"\n- Line 137: \"we suggests\"\n- Line 181: \"reward $r_A$\", shouldn't it be \"return $r_A$\"?\n- Line 201: Remind the reader that the grey box refers to Figure 1.\n- Line 212: \"Trajectories was\"\n- Line 744: \"$T_i\\left( k\\right)$ as the times that\" should be \"$T_i\\left( k\\right)$ as the number of times that\".\n- Line 810: $\\widehat{\\mu}_n = \\frac{1}{n}\\sum_1^n \\widehat{\\mu}_t$, what does this mean? Does it mean that you have $n$ samples from the optimal arm?\n- Line 311: \"we don't need\" should be \"we do not need\".\n- Line 349: \"compatible to\" should be \"compatible with\".\n- Line 399: \"an fair\" should be \"a fair\".\n- Line 423: \"nexe\" should be \"next\".\n- Line 527: \"we incorporates\" should be \"we incorporate\".\n- Line 535: \"could broadening\".\n- Line 539: \"for build\".\n\n\nI believe that the subject and the approach of the paper could be very interesting to investigate rigorously. Unfortunately, mainly due to the lack of clarity and the unsoundness of the theoretical results I have decided to reject the paper. Nevertheless, I invite the authors to take a look at my suggestions during the rebuttal period and maybe we can have an instructive discussion about a revised version of the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Main Questions:\n1) Previous studies such as \"Information capture and reuse strategies in Monte Carlo Tree Search, with applications to games of hidden information\" and \"Learning policies from self-play with policy gradients and MCTS value estimates\" have also proposed methods similar to reuse in MCTS. What are the differences between the methods in this work and those in the previous studies? This requires detailed discussion.\n2) The paper presents some theoretical analyses and proofs, but I fail to understand the specific connection between this proof and the method proposed in the article, and a detailed explanation is needed.\n3) How the proposed method performs in terms of improvement on Sampled MuZero and EfficientZero requires further experimental illustration.\n\nSome minor issues:\n1) It is recommended that the algorithm not use the pseudo-code of Python in Algorithm 1.\n2) Line 235: what does $S^{0}s$ refer to?\n3) The font size of the text in Figure 3 is too small.\n4) Is it understandable that the experimental effect of Figure 6 is not obvious and there is only a small amount of influence?\n5) Why is the decision made to periodically reanalyze the entire buffer instead of frequently reanalyzing small batches of data? How does this approach impact search efficiency?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This work proposes the backward-view reuse technique, which reutilizes previously collected data to boost search efficiency. Additionally, the entire-buffer reanalyze mechanism represents a creative adaptation of traditional reanalyze processes, allowing for more effective data utilization. This originality is further underscored by the application of these techniques across diverse environments.The paper is well-structured and clearly written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This research presents an innovative approach to enhance the efficiency of Monte Carlo Tree Search (MCTS) algorithms, like MuZero, by introducing a method called ReZero. By leveraging a backward-view reuse technique and periodically reanalyzing the entire buffer, the authors aim to reduce search costs while maintaining or even improving performance. This work promises to simplify data collection and reanalysis in various decision-making domains."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main weakness of the paper lie in the lack of innovation in the method and the scarcity of baseline comparisons in the experiment. The reuse method has some other approaches in previous MCTS works, but this paper fails to conduct a sufficient disscusion. Additionally, the experiments in the article only compare with the MuZero algorithm. Whether the EfficientZero or Sampled MuZero would show improvements when using this method requires further experimental illustration."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Using a backward-view reanalyze process inspired by one-armed bandit model and simpified framework to boost MCTS-based algorithms."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024rezero,\ntitle={ReZero: Boosting {MCTS}-based Algorithms by Backward-view and Entire-buffer Reanalyze},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yPyb2j7oZc},\nnote={under review}\n}"
},
"abstract": {
"value": "Monte Carlo Tree Search (MCTS)-based algorithms, such as MuZero and its derivatives, have achieved widespread success in various decision-making domains. These algorithms employ the reanalyze process to enhance sample efficiency from stale data, albeit at the expense of significant wall-clock time consumption. To address this issue, we propose a general approach named ReZero to boost tree search operations for MCTS-based algorithms. Specifically, drawing inspiration from the one-armed bandit model, we reanalyze training \nsamples through a backward-view reuse technique which uses the value estimation of a certain child node to save the corresponding sub-tree search time. To further adapt to this design, we periodically reanalyze the entire buffer instead of frequently reanalyzing the mini-batch. The synergy of these two designs can significantly reduce the search cost and meanwhile guarantee or even improve performance, simplifying both data collecting and reanalyzing. Experiments conducted on Atari environments, DMControl suites and board games demonstrate that ReZero substantially improves training speed while maintaining high sample efficiency."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Deep Reinforcement Learning",
"Monte Carlo tree search",
"MuZero",
"efficiency optimization",
"reanalyze"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c0da7bdbbeec9be8174528947cbc12432952909e.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "ReZero: Boosting MCTS-based Algorithms by Backward-view and Entire-buffer Reanalyze"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yQcFniousM | Pacmann: Efficient Private Approximate Nearest Neighbor Search | main | Active | Information Retrieval;Privacy | alignment, fairness, safety, privacy, and societal considerations | 6;6;6;8 | 2;3;2;4 | 3;3;3;3 | 3;2;3;3 | 2;3;3;4 | 6.5 | 2.75 | 3 | 2.75 | 3 | 0.870388 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "As far as I can see, I don't see any ethics concern."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The problem of study seems to be important, and the solution is natural and clearly written.\n* The experiment results look promising."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents Pacmann, a new private approximate nearest neighbor search scheme. The main claim is that this scheme is more accurate and efficient than prior private ANN search schemes.\n\nThe design of Pacmann includes a number of features, which I will try to recount here:\n* Pre-process the search graph to ensure that every vertex in the graph has a bounded out-degree of $C$. Then, when one performs a greedy walk on the graph (toward finding an approximate neighbor), the search for the next hop needs to inspect $C$ vertices at most.\n* To ensure privacy, the client will perform the search algorithm themselves. However, it is unreasonable to store the graph on the client side. If the client sends queries to the server in plain text, this may pose another privacy risk. The solution is a private information retrieval scheme, which allows the client to retrieve data from the server without revealing the index of interest. The paper appears to use an off-the-shelf PIR scheme from prior work, which, after client-side pre-processing, can achieve sublinear communication and computation costs for information retrieval.\n\nA number of experiments are provided to support the effectiveness of the approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(Disclaimer: I might not be an expert in critiquing this paper, as I don't have much experience in this area.)\n\n* Can you comment on the scalability of your approach? I am quite concerned with the $O(\\sqrt{n})$ communication and computation cost, as I understand that $n$ is the total number of vertices in the database. I see you ran your experiments up to 100M vertices. What does this scale mean in practice? I imagine, e.g., Google search would have a way larger scale of data than this."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Are you aware of any realistic applications?\n\nShouldn't the expansion of the PACMANN acronym include the word nearest to explain the double N?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This is an interesting idea to explore.\n\nThe scheme works well asymptotically, being the first scheme with sublinear latency, thus constituting a theoretical breakthrough.\n\nThe idea is not just naively applied with thought gone into possibilities like beam search."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper uses proposes a secure protocol for approximate nearest neighbours (ANN). The protocol is based on the client traversing a graph of the database held by a server, using a Private Information Retrieval (PIR) protocol to query each part of the graph that the client traverses. As the PIR protocol they use from previous work has O(sqrt(n)) online cost per query, and they need o(n) queries the latency is sublinear in the database size. Though the whole database must be sent during a preprocessing phase only a O(sqrt(n)) fraction of it need be stored by the client.\n\nThey provide some optimizations to the bsic idea, taking advantage of beam search to use the fact that it is cheaper per query to make several at once. they provide a small ablation study to verify this is infact helpful.\n\nThey then compare the quality of their results to the quality of some linear latency algorithm finding it improves on the baseline (on quality/latency tradeoff) when the database has size 2m to 50m depending on the network connection."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The breakthrough in asymptotics here is mostly coming from previous PIR work, though exploring the applicaiton of it is still valuable.\n\nFor the largest dataset they consider, after all their optimizations the reduction in latency is between 1.3x and 2.2x depending on the network (down from about 4 seconds) to achieve this they require 3GB of client storage and 60GB of preprocesing communication. I am not aware of and they do not point to any realistic application in which this is a good tradeoff in practice."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "[Q1] Could you provide security analysis of the proposed method?\n\n[Q2] It would be better to compare efficiency with Tiptoe-based solution."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "[S1] The motivation of this paper is clear and interesting, which is to address the privacy and efficiency challenges of remotely querying a vector database. \n\n[S2] This paper successfully identifies the and addresses the limitations of existing approaches. It introduces a novel application of private information retrieval (PIR) to perform ANN search in a vector database. Compared with the baseline Tiptoe [1], this work does not involve heavy clustering-based ANN algorithm and homomorphic encryption, acheving promising efficiency for private query.\n\n[1] Henzinger, Alexandra, et al. \"Private web search with Tiptoe.\" Proceedings of the 29th symposium on operating systems principles. 2023.\n\n[S3] This paper is well-organized with necessary background knowledge and clear illustration of high-level ideas. The language and presentation of this paper are easy understand.\n\n[S4] This paper provides fair discussion on the proposed approach and identifies the limitation of this work. For example, it is more suitable for scenarios with good network connection, which would lead further research on potential following works on more general network connection. \n\n[S5] This paper provides rigorous theoretical insights and illustrate the gap between theory and practice to enhance the understanding of graph-based ANN search."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces PACMANN, a private approximate nearest neighbor (ANN) search method enabling clients to conduct privacy-preserving nearest neighbor queries across hundreds of millions of vectors. PACMANN utilizes state-of-the-art private information retrieval technique and customized searching algorithm to query vector database both privately and efficiently. PACMANN surpasses current leading private ANN search techniques in search quality and offers reduced latency for large-scale datasets. This approach could inspire more follow-ups applications in the area."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "[W1] The security analysis of the proposed approach is missing, which should be provided for a work specified in privacy-preservation. Although the core procedure PIR is invoked in black-box style, this paper should formally capture the potential information leakage during the workload. For example, the baseline of this paper Tiptoe provides a security analysis in Appendix D.\n\n[W2] It would be better if the baseline Tiptoe can be implemented more completely in experiments. For example, this paper claims the efficiency limitation of Tiptoe comes from heavy homomorphic encryption and some other factors. As far as I know, Tiptoe just requires lightweight homomorphic encryption (i.e., homomorphic addition in LWE-style ciphertexts) and it is not that slow. For fair comparison, the simulation of Tiptoe should be equipped with homomorphic encryption to confirm the outperformance of this paper.\n\nTypos: \n“We could potentially the verifiable” in Appendix C.1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "NA"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Query search mechanisms using fully homomorphic encryption (FHE) and multi-party computation (MPC) often suffer from high computational complexity. This paper addresses these challenges by introducing a localized search approach that performs iterative graph traversal on the client side. To minimize computation costs, it preprocesses Private Information Retrieval (PIR) to obtain private information from the server efficiently. Leveraging the Piano framework, the paper achieves a reduction in computation and communication costs to $O(\\sqrt{n})$. For practical implementation, it further enhances efficiency with techniques such as beam search, fast start, and batched PIR queries."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces PACMANN, a private Approximate Nearest Neighbor (ANN) search scheme enabling clients to search vector databases without exposing their query vector to the server. Unlike previous methods relying on server-side encrypted searches, PACMANN offloads limited computation and storage to the client. It achieves up to 2.5× higher search accuracy on real-world datasets compared to prior private ANN schemes, approaching 90% of the quality of a state-of-the-art non-private ANN algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The definition of privacy in the paper is somewhat vague. Could you clarify the attack model? Specifically, what are the attacker’s capabilities, and what information are they attempting to extract from the dataset? Providing these details would help make the definition of privacy more precise and formal.\n\nIn addition to FHE, MPC, and PIR, differential privacy (DP) can also provide formal privacy guarantees. While DP generally incurs low computational costs, it may reduce the model’s utility. Including a comparison with DP-related work would make the paper more comprehensive."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A privacy-preserving nearest neighbor search algorithm that achieves more than 2x improvement in search quality with even lower latency compared to SOTA."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024pacmann,\ntitle={Pacmann: Efficient Private Approximate Nearest Neighbor Search},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yQcFniousM},\nnote={under review}\n}"
},
"abstract": {
"value": "We propose a new private Approximate Nearest Neighbor (ANN) search scheme\nnamed Pacmann\nthat allows a client to perform ANN search\nin a vector database \nwithout revealing the query vector to the server.\nUnlike prior constructions that run encrypted search on the server side\nwith computationally-intensive cryptographic techniques,\nPacmann carefully offloads limited computation and storage to the client. \nSpecifically, clients run a graph-based ANN search, where in each hop on the graph, the client privately retrieves local graph information from the server. \nTo make this efficient, we combine two ideas: \n(1) we adapt a leading graph-based ANN search algorithm to be compatible with private information retrieval (PIR) for subgraph retrieval;\n(2) we use a recent class of PIR schemes that trade offline preprocessing for online computational efficiency. \nPacmann achieves significantly better search quality than\nthe state-of-the-art private ANN search schemes,\nshowing up to 2.5$\\times$ better search accuracy on \nreal-world datasets than prior work and\nreaching 90\\% quality of a state-of-the-art \nnon-private ANN algorithm.\nMoreover on large datasets with up to 100 million vectors,\nPacmann shows better scalability \nthan prior private ANN schemes\nwith up to 2.6$\\times$ reduction in computation time\nand 1.3$\\times$ reduction in overall latency."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Information Retrieval",
"Privacy"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cad28f8ca39a032e03d875a914685a91c05e4c46.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Pacmann: Efficient Private Approximate Nearest Neighbor Search"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yR47RmND1m | Identifying and Tuning Safety Neurons in Large Language Models | main | Active | Large Language Models;Alignment;Safety;Interpretability;Neuron Detection | alignment, fairness, safety, privacy, and societal considerations | 3;5;6;8;8 | 5;2;4;4;3 | 2;3;3;3;4 | 2;3;3;3;4 | 3;3;3;3;4 | 6 | 3.6 | 3 | 3 | 3.2 | -0.310087 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* In table 4, why you used llama2-7B-chat (which is a safety-aligned model) and Mistral-7B-Instruct-v0.2 (which is not a safety-aligned model) for downstream fine-tuning tasks ? How about use Mistral-7B models that have been safety-aligned by your proposed method $\\texttt{SN-Tune}$?\n* This paper [1] also proposes some detection methods to find safety neurons and perform operations on the identified neurons to reinforce the model's safety. Could the authors discuss more between this paper and findings of your paper?\n* There are several papers [1][2][3][4] also study on safety neuron/ safety mechanism/ backdoor, it will better if the authors could add some comments on them.\n\nIf the authors address most of my concerns, I would consider increasing the score.\n\n[1] Wei, Boyi, et al. \"Assessing the brittleness of safety alignment via pruning and low-rank modifications.\" arXiv preprint arXiv:2402.05162 (2024).\n\n[2] Chen, Jianhui, et al. \"Finding Safety Neurons in Large Language Models.\" arXiv preprint arXiv:2406.14144 (2024).\n\n[3] Hsu, Chia-Yi, et al. \"Safe LoRA: the Silver Lining of Reducing Safety Risks when Fine-tuning Large Language Models.\" arXiv preprint arXiv:2405.16833 (2024).\n\n[4] Zhang, Zhengming, et al. \"Neurotoxin: Durable backdoors in federated learning.\" International Conference on Machine Learning. PMLR, 2022."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper is well-written and the idea is easy to be understood.\n* Interesting verification of safety neurons. The authors used detailed experiments to verify that the identified safety neurons have a significant impact on the model's safety and found that these safety neurons are relatively few in number, primarily located in the self-attention layers of the first few blocks of the model.\n* Extensive experiments. The authors validated the effectiveness of $\\texttt{SN-Tune}$ on both instruction models and base models, as well as on downstream fune-tuning tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an innovative method for effectively and efficiently detecting and fine-tuning \"safety neurons,\" which comprise less than 1% of model parameters and are predominantly located in low-level self-attention layers. The authors conducted related experiments to verify that safety mechanism is resilient but breakable. Notably, the proposed tuning method $\\texttt{SN-Tune}$ enhances model safety without sacrificing performance, significantly lowering harmful output scores in both instruction-tuned and base models. This approach also improves safety robustness during downstream fine-tuning tasks, such as GSM8K, by isolating safety neurons from foundational ones. Additionally, the authors explored the influence of the number of safety documents and performed grid searches over learning rates and training epochs in their ablation study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Although authors empirically verify the identified safety neurons by their detection method have a significant impact on the model's safety, the proposed detection method lacks theoretical support for why it can identify neurons that have such a significant impact on safety.\n* Why does the RHS of equations(4)(5) of the main paper seem to be independent of $l$ (the $l$-th neuron in the $i$-th layer)? They appear to be the same value for different $l$. Also in appendix, it is confusing why $h_{ffn}$ is a vector, could you help write the specific form? And how can you get (8) from (7) since x is a input for $W_{down}$ in (8) but not for $W_{down}$ in (7)?\n* For downstream fine-tuning tasks, could you consider more datasets like Alpaca or Dolly to verify the effectiveness of $\\texttt{RSN-Tune}$?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Position-Dependent Activation Patterns:\n- How does the position of jailbreaking prompts affect safety neuron activation? Is there a correlation between prompt location and safety neuron response?\n- Do safety neuron distributions shift in long-text scenarios?How does the context window length affect the stability of safety neurons?\n\n2. Dynamic Nature of Safety Neurons during Fine-tuning:\n- Can non-safety neurons transform into safety neurons during full-parameter fine-tuning? How does SN-Tune handle the potential emergence of new safety neurons?\n- How can we track and verify the transformation of regular neurons into safety neurons?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. Originality: The paper introduces an innovative approach to understanding LLM safety through the novel concept of \"safety neurons.\" The clear definition and identification method for these neurons represents a significant departure from traditional safety alignment approaches, offering a fresh perspective on model safety mechanisms.\n\n2. Experimental Rigor: The research demonstrates exceptional thoroughness in its experimental investigation, providing comprehensive analysis of safety neurons' characteristics, including their proportion (<1% of parameters), location sensitivity, robustness, and cross-lingual transferability. Each finding is well-supported by detailed empirical evidence.\n\n3. Practical Impact: The proposed SN-Tune method offers a highly effective solution for enhancing model safety while maintaining general capabilities. Its ability to achieve significant safety improvements by tuning only a small subset of parameters makes it both efficient and practical for real-world applications, demonstrating immediate value for improving existing language models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the crucial concept of \"safety neurons\", which investigates neural activation patterns when large language models encounter unsafe instructions and jailbreak attacks. The definition of \"safety neurons\" is concise, clear, and effective. Furthermore, the paper presents several significant findings: the proportion of safety neurons is remarkably small, constituting less than 1% of total parameters; the robustness study of safety neuron locations reveals sensitivity decreases from front to back layers, with safety neurons in the first 10-20 layers being more sensitive; and the transferability of safety neurons across multiple languages is investigated. Additionally, the authors propose the SN-Tune method, which exclusively fine-tunes safety neurons, achieving enhanced safety conditions by tuning only a small subset of parameters without catastrophic forgetting of general knowledge. The paper is easy to follow and well-written, with clear methodology explanations, comprehensive experimental results, and logical organization of findings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Theoretical Foundation: The paper lacks deeper theoretical analysis of observed phenomena. For example:\n- The mechanism behind the \"back-to-front deactivation\" pattern remains unexplained, where safety breakdown only occurs after almost all safety neurons are deactivated\n- This observation raises questions about whether the neurons in later layers are truly \"safety neurons\"\n- The nature and characteristics of overlapping safety neurons across different languages deserve more theoretical investigation\n\n2. Limited Analysis of Cross-lingual Safety Mechanisms:\n- While the paper identifies low overlap (30%) between safety neurons across languages, it doesn't explore:\n * The characteristics of these overlapping neurons\n * Why certain neurons are shared across languages while others are language-specific\n * The potential universal principles of safety mechanisms across languages"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "please see weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper writing is good and very easy to follow.\n2. The defense performance is strong, even compared with current SOTA defense method.\n3. The logic of this article is also very coherent. The author first suggests enhancing the effectiveness of the model's safe responses through a more detailed approach to neuron control. The author also conducted relatively detailed ablation experiments to support the faithfulness of safe neurons."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a method for identifying safety neurons in Large Language Models (LLMs), which are critical for managing harmful queries and represent less than 1% of the model parameters. It introduces a technique named SN-Tune that focuses on tuning these safety neurons while maintaining the overall capabilities of the models. The findings indicate significant reductions in harmful scores, with Llama3-8B-Instruction decreasing from 65.5 to 2.0. Furthermore, the approach improves safety robustness during fine-tuning by separating safety neurons from foundation neurons."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I believe the biggest shortcoming of this paper is that the description of the experiments for identifying safe neurons is very insufficient. The author does not mention the datasets used, parameter details, or the time costs associated with the experiments. The dataset is crucial because it relates to the generalizability of the method. For example, if the author only uses datasets related to violence to identify neurons, but the identified safe neurons still demonstrate good defensive capabilities against attacks related to pornography or other safety categories, including jailbreaking attacks, it suggests that safe neurons are relatively fixed within LLMs. However, the author does not mention this point."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Would it be correct to conclude from lines 362-363 that your results indicate that base models already contain safety neurons, despite not being safety-tuned? \n\n1. It could be useful to have a study on the distribution of safety neurons in the base models to verify they share similar properties to those in fine-tuned models, i.e. being concentrated (<1% of parameters) and being predominantly present in earlier layers and attention modules. \n\n1. Similarly, one could study whether their harmfulness scores get somehow even worse when these neurons are disabled. To make this non-trivial, you could have “safe-behavior-inducing” few-shot examples in the prompt, for example."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Simplicity of safety neuron detection: their notion of safety neuron is conceptually simple and can be computationally ascertained (instead of relying on manual analyses). Their later experiments indicate that one can indeed obtain a useful safety tuning algorithm by using their safety neuron detection method.\n\n1. Empirical insights into how safety mechanisms are implemented in LLMs: the paper’s results indicate that safety neurons are very concentrated (1% of parameters), occur primarily in self-attention modules and in earlier layers, and are mostly not shared across languages. These insights seem relevant for the community as a whole, particularly when devising defenses to multi-lingual attacks.\n\n1. Relevance of results for practitioners: one of the biggest hurdles in safety tuning is the performance degradation it usually entails if done in a naive way. The results in the paper suggest SN-tune might be useful for practitioners looking to safety-tune models without harming their capabilities. In particular, one could in principle automatically detect safety neurons using a small corpus of examples, rather than requiring manual analyses or large corpora.\n\n1. The paper is written mostly clearly and was easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors first introduce a technique for detecting LLM neurons (i.e. rows or columns in LLM weight matrices) responsible for safety (e.g. refusing to comply with harmful instructions). The method is fairly simple: a neuron is said to be a safety neuron if deactivating it changes the final next-token embedding by at least some threshold (measured in Euclidean distance) on each of a set of inputs. They validate that deactivating these neurons indeed increases harmfulness scores of three open-weight models, while preserving performance on capabilities benchmarks.\n\nThey then propose a safety fine-tuning method, named SN-Tune, which tunes only these safety neurons, keeping the remaining ones fixed. They show SN-tune significantly reduces harmfulness scores across several open weight models; e.g. Llama3-8B’s score decreases from 65.5 to 2.0. Finally, they propose a refinement of SN-Tune, termed RSN-Tune, which also identifies neurons responsible for model capabilities, and only tunes safety neurons which do not overlap with such capabilities neurons, hence mitigating capability degradation during safety tuning. Additional experiments include a study of the overlap of safety neurons across languages (which they find to be small) and of applying SN-tune to pre-trained models (rather than already-fine-tuned models)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of detail on how foundation neurons are detected: in Section 4, the authors propose not tuning safety neurons that also serve as foundation neurons (i.e. play a role in model capabilities). However, they do not clarify how they detect the foundation neurons. This harms the reproducibility of the RSN-tune results.\n\n1. Notation and presentation in Section 2.1: I would recommend revising the presentation in the Accelerated Safety Neuron Detection. For example, the authors write that Mask is an identity matrix, which, as stated, cannot be true. Looking at the appendix, it seems they might mean it is a binary matrix (i.e. the entries are 0 or 1). Also, the authors could clarify what they mean by parallel computations. Does this simply mean adding many computations in a batch? Generally, it seems that their notion of safety neuron is embarrassingly parallelizable, in the sense that their criterion can be computed independently for all neurons.\n\n1. Missing citation of Kotha et al. (2023) in section 2.3.2: one relevant work concerning multi-lingual attacks on LLMs is Understanding Catastrophic Forgetting in Language Models via Implicit Inference, by Kotha et al. 2023. They show that translating harmful instructions into low-resource languages can elicit harmful responses, which can then be translated back to the original language. Their results are complementary to the author’s findings that safety neurons have little overlap across languages.\n\n1. Another work the authors do not discuss, but which I believe is relevant and complementary to their results, is Mechanistically analyzing the effects of fine-tuning on procedurally defined tasks, by Jain et al. (2024). Amongst other results, Jain et al. also find that the effects of fine-tuning can be neutralized by pruning a small number of neurons."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "-\tFrom which figure/table can we infer that “1%” are safety neurons?\n- Why were helpfulness scores not reported?\n- Why were Llama2 instruction-tuned models not reported in Table 2? It is known that Llama2 is more resilient than Llama3 in terms of instruction following. Hence, the Llama2's results for Fig 2 need to be reported.\n-\tWhat’s language X and Y in Fig 5? \n-\tWhere are the results of the five languages? \n-\tWhat is the objective of having section 2.3.2 regarding the theme of the paper?\n-\tWhat is “Language-specific neurons” in line 291? It has not been defined nor introduced beforehand."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper deals with an important topic, the safety issue of LLMs – for safety alignment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper identifies safety neurons that are associated with safety alignment in LLMs. The authors claim that the safety neurons constitute less than 1% of all parameters, and they are predominantly located in self-attention layers. Experimental results have been provided"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- My biggest concern is the distinction between paper, Zhao et al (2024b) (How do Large Language Models Handle Multilingualism? Yiran Zhao, Wenxuan Zhang, Guizhen Chen, Kenji Kawaguchi, Lidong Bing 2024). I believe this submission’s section 2.1 is the meat of the work, but the formulations are all found in Zhao et al (2024b) – just the same. Then, this submission’s distinctive contribution is very unclear. The only difference is (3) where the authors feed harmful queries and see the activations (although the sentence in lines 124-125 is broken, so it is speculated. It is impossible to know their actual implementation.) However, the entire formulations is just the same with the other paper, with only a minor tweak with harmful queries for safety context.\n\n- Moreover, the authors failed to properly provide a reference for the formulation/equation (1) and (2) and give credit to the authors of the paper. The paper is referred to only for the parallel neuron detection method. ((4), (5), and (6)). \n\n-\tMoreover, in terms of the paper’s direction and theme, this submission’s distinctive novelty from the following paper is unclear: “Assessing the Brittleness of Safety Alignment via Pruning and Low-Rank Modifications,” Boyi Wei · Kaixuan Huang · Yangsibo Huang · Tinghao Xie · Xiangyu Qi · Mengzhou Xia · Prateek Mittal · Mengdi Wang · Peter Henderson, ICML 2024. This paper also identifies safety components in LLMs.\n\n-\tUnlike the authors’ statement, “Regarding general capability, deactivating the safety neuron shows minimal impact, “similar to deactivating randomly selected neurons”,……” in Table 1, Deact_SN’s Avg. Capability is constantly, always lower than Deact-R, which hints that the safety neurons are also contributing to general capability. If the weights were purely safety neurons, when they were pruned, they shouldn’t impact the general capability even with no need to compare it with Deact-R. But, the table result shows there exists an impact on general capability. \n\n-\tIn order to conclude “Safety neurons predominantly reside within the self-attention layers.”, the authors should have reported the original number of neurons and the proportion of safety neurons in feed-forward and self-attention layers, respectively – not just the split.\n\n-\tFor section 3, “Efficient Safety Training” the authors claimed SN-Tune is efficient by comparing the training cost with Circ-Break. However, SN-Tune requires a process and resources and effort to “identify” the safety neurons. Only after the neurons are obtained, the SN-Tune can be applied. Hence, it is not a fair comparison. The cost/effort for identifying the safety neurons must be taken into account, but they were not in this submission.\n\n-\tIt is unclear the impact of RSN-Tune compared to SN-Tune by looking at the results of GSM8K in Table 4.\n\n-\tThis submission lacked an important and nominal paper in reference, with regard to neuron importance, Pavlo Molchanov, Arun Mallya, Stephen Tyree, Iuri Frosio, and Jan Kautz. Importance estimation for neural network pruning. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024identifying,\ntitle={Identifying and Tuning Safety Neurons in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yR47RmND1m},\nnote={under review}\n}"
},
"abstract": {
"value": "Safety alignment for Large Language Models (LLMs) has become a critical issue due to their rapid progress. However, our understanding of effective safety mechanisms in LLMs remains limited, leading to safety alignment training that mainly focuses on improving optimization, data-level enhancement, or adding extra structures to intentionally block harmful outputs. To address this gap, we develop a neuron detection method to identify safety neurons—those consistently crucial for handling and defending against harmful queries. Our findings reveal that these safety neurons constitute less than $1\\%$ of all parameters, are language-specific and are predominantly located in self-attention layers. Moreover, safety is collectively managed by these neurons in the first several layers. Based on these observations, we introduce a $\\underline{S}$afety $\\underline{N}$euron $\\underline{Tun}$ing method, named $\\texttt{SN-Tune}$, that exclusively tune safety neurons without compromising models' general capabilities. $\\texttt{SN-Tune}$ significantly enhances the safety of instruction-tuned models, notably reducing the harmful scores of Llama3-8B-Instruction from $65.5$ to $2.0$, Mistral-7B-Instruct-v0.2 from $70.8$ to $4.5$, and Vicuna-13B-1.5 from $93.5$ to $3.0$. Moreover, $\\texttt{SN-Tune}$ can be applied to base models on establishing LLMs' safety mechanism, effectively diminishing models' harmful scores from around $100$ to $5.3$, $13.5$, and $13.8$ for LLama2-7B-Base, LLama3-8B-Base, and Mistral-7B-v0.1, respectively. In addition, we improve the LLMs' safety robustness during downstream tasks fine-tuning by separating the safety neurons from models' foundation neurons."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Models",
"Alignment",
"Safety",
"Interpretability",
"Neuron Detection"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a7dd10ced5b17cbc2db1ee7624df5dc2f31b56e5.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Identifying and Tuning Safety Neurons in Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yRKelogz5i | Causally Motivated Sycophancy Mitigation for Large Language Models | main | Active | Large Language Model; Sycophancy; Causal Modeling | foundation or frontier models, including LLMs | 5;5;5;6 | 4;3;3;4 | 3;2;2;2 | 3;3;2;2 | 2;2;2;2 | 5.25 | 3.5 | 2.25 | 2.5 | 2 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Typo: \nLine 200, remove \"a directed\""
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. It is the first to apply Structured Causal Models (SCMs) to analyze and model sycophancy behavior in Large Language Models (LLMs), offering an innovative research perspective.\n2. Extensive experiments show that CAUSM is superior to existing state-of-the-art methods in mitigating sycophancy in LLMs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new framework called CAUSM (Causally Motivated Sycophaocy Mitigation) aimed at reducing sycophancy in Large Language Models (LLMs). The paper analyzes and models the sycophancy issue in LLMs through the lens of Structured Causal Models (SCMs). \nA significant causal signature is proposed to distinguish latent causal embeddings from spurious embeddings that cause sycophancy. The paper further propose an intervention-based scheme to calibrate the direction of the derived causal representations. Extensive experiments show that the proposed approaches outperforms the state-of-the-art competitors."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Line 81, the phrase “To map the latent causal embeddings to the observable intermediate components of LLMs” appears to conflict with the statement “a significant causal signature which can distinguish the intended causal embeddings from spurious embeddings which incur sycophancy within the latent representation space.” Could you clarify this discrepancy?\n\n2. How can I(X_P ; Y | Z) be approximated by Eq. 7? Is causal intervention controllable? Could you provide an example to illustrate this?\n\n3. Why does the intervention \\bar{X}_P maximize the difference in cross-entropy losses for a fixed W? Is this a result of the algorithm's design, or does it align with the intrinsic nature of interventions?\n\n4. The author claims to utilize Parameter-Efficient Fine-Tuning (PEFT) in Line 267, yet states in the baselines section that all parameters are fine-tuned. This is rather confusing.\n\n5. In Section 4.3, how is the weight matrix value obtained, and what does |w_l^h| represent? Are these parameters part of the adaptor in PEFT, or are they parameters of the model itself? How are these parameters utilized specifically?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See in weaknesses.\n\nThe author does not need to address each identified weakness individually. Instead, it would be more effective to provide additional experimental results that demonstrate that the learned representations truly capture causality, beyond merely improving performance. For instance, could the correlation between the generated text and the learned representation be measured? Does the disentangled representation contribute more meaningfully to sycophancy-related terms or phrases? A more fine-grained case study focused on these aspects would offer stronger evidence for the proposed approach than the current, label-focused explanations.\n\nI would like to increase my score if the author could provide more detailed results."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors provide a novel framework, CAUSM, which leverages structured causal models to address sycophancy in large language models (LLMs). By introducing a causal approach, they advance beyond existing methods that may depend on spurious correlations, achieving more reliable mitigation of sycophantic responses.\n\n2. The article is well-structured, with clearly delineated sections detailing the problem (sycophancy in LLMs), prior approaches, and the limitations they aim to address with CAUSM. This clarity is helpful for readers who may be less familiar with the subject matter."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel framework called CAUSM to address sycophancy in large language models (LLMs), which refers to the models’ tendency to align with user preferences even when those preferences lead to incorrect or biased outputs. This behaviour reduces the reliability and factual integrity of LLM responses. The authors conclude that CAUSM effectively mitigates sycophantic behaviour in LLMs by focusing on the causal structure of sycophantic representations. The framework offers a scalable solution to improve the factual reliability of LLMs while respecting user preferences, which holds promise for enhancing trust in AI outputs in real-world applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concerns focus on the application of the structured causal model (SCM) approach. The authors state that causal relations can be captured via a directed acyclic graph (DAG) learned through regularization. However, in real-world applications, the regularization term is unlikely to reach zero. How can the authors be certain that the learned representations are truly disentangled? The methodology would benefit from a more robust approach to verifying that these representations capture causal rather than correlated information.\n\nMy second question relates to the evaluation of DAG structures in SCMs. Typically, metrics like Structural Hamming Distance (SHD) or False Discovery Rate (FDR) are used to confirm if the learned graph conforms to a DAG structure. This paper, however, claims that the approach is inspired by graph models without providing a specific graph-based evaluation. How can we be sure that the learned representations are genuinely causality-related rather than optimized merely by overfitting through additional parameters? Both theoretical analysis and case studies would strengthen the authors’ claims.\n\nRegarding the representation learning approach, it seems counterintuitive that a fully supervised learning model without any stochastic components could infer causality from data alone. The mutual information-based independence criterion here could indicate correlation, but correlation does not imply causation. Causal inference methods typically rely on stochasticity in at least one part of the model (e.g., a two-tower architecture) to differentiate causality from mere correlation. Without this, there is a risk of learning coincidental patterns rather than true causal relationships.\n\nLastly, a minor issue: without providing a formal proof, I recommend avoiding the use of terms like \"Lemma\" to present conclusions, as this suggests a level of mathematical rigor that is not fully substantiated in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the section on Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper aims to address LLMs’ sycophancy issue, which is an important topic in the community.\n\n2. A variety of experiments have been conducted to show the effectiveness of the proposed approach across different datasets.\n\n3. The high-level structure of the paper is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the issue of sycophancy in large language models (LLMs) and introduces CAUSM, a novel method for identifying and mitigating sycophantic behavior within the models’ latent representations. The authors view sycophancy in LLMs as “spurious correlations between user preferences and model outputs”. By leveraging structured causal models, they aim to disentangle sycophantic representations from causal embeddings. An intervention-based technique is then developed to recalibrate the causal representation direction embedded in attention heads."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation for the proposed approach and the intuition of the algorithm design needs to be more clear. \n- In the Introduction, the authors discuss two groups of prior research on LLM sycophancy: (1) linear probing and (2) path patching. However, these prior works primarily concentrate on analyzing and understanding sycophantic behavior in LLMs, rather than on mitigating it (also as mentioned in Related Work line 113-122). Given the distinct emphasis of these studies compared to the authors' goal, it is unclear how the limitations of these earlier works directly motivate the development of the authors’ proposed approach.\n\n- While the authors discuss several recent studies on mitigating LLM sycophancy in the Related Work section (lines 124–135), a more in-depth comparison between these studies and their own approach would be beneficial. Specifically, it would be helpful to understand if there are potential methodological concerns with the designs proposed by Burns et al. (2022) and Rimsky et al. (2023) that inspired the development of CAUSM.\n\n- It would be helpful to discuss the rationale for using structured causal models to capture sycophancy in the models’ latent representations.\n\n2. Some compared methods in the tables/figures of results are confusing. E.g., I couldn’t find a clear definition of CAUSM (Base) (Table 1 & 2) and CAUSM (Table 2). What is the difference between CAUSM and CAUSM (Base) in Table 2? \n\n3. Did the authors conduct the experiments on multiple base LLMs or specifically focus on a single base LLM (i.e., Llama-2-7B-Chat)?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "I have no ethics concerns regarding this paper."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why does accuracy decrease as K increases, and what factors influence the optimal K value?\n2. How does the paper measure and rank the sycophancy-relatedness of all attention heads and select the top-K heads?\n3. What are the strengths and weaknesses of Sycophancy Heads Pruning and Sycophancy Representation Editing, and are there any practical suggestions for choosing between them?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.The author models the phenomenon of sycophancy in language models as a type of spurious correlation in causal structures, making it possible to address sycophancy through conditional independence constraints.\n2.The CAUSM method achieves excellent results in mitigating sycophancy across INTRA-DATASET, CROSS-DATASET, and CROSS-TASK scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyzes and models sycophancy in LLMs through the lens of structured causal models (SCMs), which is actually the reliance on spurious correlations between user preferences and model outputs. Based on the proposed SCMs, this paper develops a novel framework called CAUSM to mitigate sycophancy in LLMs by exploiting a significant signature."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation for the Causal Activation Calibration method in Section 4.3 is unclear. Specifically, the relationship between the causal direction in Section 4.3 and the SCM in Section 4.1 is not sufficiently clear. Please provide further clarification on this point. My understanding is that Equation (6) aims to add a conditional independence constraint to the original training objective, intending to eliminate spurious correlations between $Z_S$ and $Y$. However, I am not convinced how the “causal direction” in Section 4.3 effectively mitigates these spurious correlations. Does this approach leverage the causal direction as a representation of causal effect orientation, or is there some other theoretical justification? I suggest that the authors clarify this aspect in the paper.\n\n2. The second statement in Lemma 4.1 is not described clearly enough. I would like to understand which part of the subsequent methods specifically utilizes this statement. For instance, if we consider a specific example where $f(Z_C,Z_S) = Z_C$, then this condition does not hold in the causal graph shown in Figure 2(a). Furthermore, does the second statement in Lemma 4.1 offer any guidance in constructing the subsequent methods? If not, I would suggest the authors consider removing this statement."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024causally,\ntitle={Causally Motivated Sycophancy Mitigation for Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yRKelogz5i},\nnote={under review}\n}"
},
"abstract": {
"value": "Incorporating user preferences into large language models (LLMs) can enhance the personalization and reliability of model outputs and facilitate the application of LLMs to real-world scenarios. However, leveraging user preferences can be a double-edged sword. Recent studies have found that improper utilization can incur sycophancy, where LLMs prioritize alignment with user preferences over the correctness of their outputs. To address sycophancy in LLMs, we analyze and model the problem through the lens of structured causal models (SCMs). We attribute sycophancy to LLMs' reliance on spurious correlations between user preferences and model outputs in this paper. Based on the proposed SCMs, we develop a novel framework to mitigate sycophancy in LLMs by exploiting a significant causal signature. Specifically, we eliminate the spurious correlations embedded in the intermediate layers of LLMs through head reweighting, and then calibrate the intra-head knowledge along the causal representation direction. Extensive experiments are conducted across diverse language tasks, and the empirical results demonstrate the superiority of our method over state-of-the-art competitors in mitigating sycophancy in LLMs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Model; Sycophancy; Causal Modeling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/970998c12d53f2bb3d0119b380b4a7f0785070bf.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Causally Motivated Sycophancy Mitigation for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yRd4loGAhJ | SEAL: Scaling to Emphasize Attention for Long-Context Retrieval | main | Active | large language models;long context;retrieval;attention;supervised fine-tuning | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;5;5;6 | 4;4;4;4 | 2;3;2;3 | 2;2;3;2 | 1;3;2;3 | 4.75 | 4 | 2.5 | 2.25 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper is well-organized and easy to read. \n2. The proposed method presents a reasonable approach for long-context retrieval by identifying the key components of Transformer architecture to boost retrieval performance. \n3. The approach is practical and has the potential for broad application in various RAG settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work focuses on scaling to emphasize attention to long-context retrieval, designed to enhance the retrieval performance of LLMs in handling extended contexts. A cost-effective, learning-based mechanism is proposed to improve the model's performance in long-context retrieval tasks, which emphasizes specific attention heads tailored to retrieval tasks. Experimental results demonstrate superior performance over the compared baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The term \"cost-efficient\" is not clearly defined, resulting in ambiguity when assessing the cost-effectiveness of the approach. The strategy of identifying key components initially and subsequently fine-tuning these components may prove to be computationally intensive. It would be beneficial to provide details regarding the computational time involved in this process.\n2. A more thorough evaluation would benefit from comparisons with a broader range of advanced baseline models. Currently, the proposed method is compared against only one simple. Including more sophisticated long-context modeling methods and state-of-the-art techniques would better validate the effectiveness of the proposed method.\n3. To confirm the versatility of the proposed method, it would be beneficial to conduct experiments on different LLMs of varying sizes."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. What specifically constitutes “long-context retrieval”? Could the authors clarify this definition and provide more precise terminology?\n2. Why are different LLMs used in Figures 5 and 6? Is there a specific reason for the model changes, and how do these variations impact the comparability of the results?\n3. Can the authors provide experiments isolating the effect of Self-Extend in Figure 5 to verify its individual impact on performance?\n4. What is the rationale behind using the same datasets for “in-domain” and “out-of-domain” experiments in Table 3? How is “out-of-domain” defined in this context, and what criteria differentiate the two?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. SEAL presents an innovative approach by leveraging attention head/channel scaling to enhance long-context retrieval.\n2. The method uses very few trainable parameters and requires minimal training data, making it highly efficient."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces SEAL (Scaling to Emphasize Attention for Long-context retrieval), a novel attention scaling approach that improves retrieval performance for long-context tasks in Large Language Models (LLMs). It addresses the challenge of performance degradation over extended contexts, particularly in retrieval tasks. SEAL fine-tunes specific attention heads or channels using a minimal amount of training data, leading to significant improvements in long-context retrieval across various benchmarks. The paper focuses on cost-efficient enhancement of long-context capabilities without altering the model’s learned behavior."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The term “long-context retrieval” is ambiguous. It would be clearer to refer to “retrieval tasks that have long contexts,” which directly emphasizes tasks like passage retrieval or number retrieval.\n2. The paper lacks explicit detail about which context extension techniques are used. For example, Figure 6 mentions the use of Self-Extend, but no experiments isolating its performance are provided.\n3. Logical Flow in Writing: Certain parts of the paper are difficult to follow due to writing issues such as ambiguous expressions, inconsistent time tense, and occasional typographical errors (e.g., “biases” instead of “bias”).\n4. The distinction between “in-domain” and “out-of-domain” in the experiments is confusing. Specifically, if “in-domain” refers to training on retrieval tasks, why are the same datasets used for both “in-domain” and “out-of-domain” experiments?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Except fewer parameters, what other advantages does SEAL have over LoRA or other PEFT methods? Since the parameters of SEAL-L is also small compared to LLMs, what are the unique application scenarios for SEAL?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper proposes SEAL to efficiently adjusting the strength of each attention component, and achieves superior performance to various LLM baselines in long-context retrieval.\n2. The content, figures, and tables of the paper provide a detailed explanation and analysis of the motivation, methods, and experiments, facilitating the readers' understanding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an approach called Scaling to Emphasize Attention for Long-context retrieval (SEAL), which emphasizes specific heads or channels (attention outputs) particularly related to long-context retrieval by efficiently adjusting the strength of each attention component. The authors claimed that SEAL achieves significant improvements in in-domain retrieval performance and cross-domain document QA tasks, also extends the context limits of LLMs while maintaining highly reliable outputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The experimental results in Table 1 show that SEAL-H and SEAL-C require fewer parameters than Baseline and SEAL-L. However, their performance does not consistently surpass SEAL-L in long-context scenarios, failing to demonstrate the authors' claims.\n2. The experiments only select SEAL-L as the baseline, it should include other PEFT methods for comparison."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can you demonstrate the unique advantages of your method compared to LoRA through more experiments?\n2. Can you train and test on more various task types to demonstrate the generalization of your method?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper discovers that a certain attention head can cast a remarkable positive or negative effect on long-context retrieval accuracy, even as well as a certain channel. This is interesting and helpful for us to further understand the role of the internal modules of LLMs.\n\n2. The proposed method, SEAL, is very cost-effective, which only needs very few training samples and tuned parameters.\n\n3. There are enough evaluation results of various models to demonstrate the method’s effect."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel and practical method, SEAL, to improve the long-context retrieval ability of LLMs. \n\nFirst, through perturbation experiments, it finds a certain attention head or a certain channel in it can cause a positive or negative effect on long-context retrieval accuracy.\n\nSecond, it demonstrates directly scaling the hidden states of these heads or channels can indeed improve the retrieval accuracy of LLMs. \n\nThird, it adds trainable scale factors into the model and use a small amount of samples of retrieval tasks to fine-tune the model. The results show SEAL can remarkably improve the long-context retrieval ability of LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Narrow scope\n\nThe method seems to only be applicable for classic retrieval tasks such as NIAH, and the training data is also the same types of tasks. It will not be surprising that this leads to an improvement, since this task has been too simple, fixed and formulaic, which may represent a narrow application scope for this method. It would be better to train and test on more tasks such as Knowledge-QA.\n\n2. No unique advantages\n\nThe author should empirically test whether the time or space required by SEAL is significantly less than that of LoRA. Otherwise it cannot show significant superiority of SEAL compared to LoRA. Because the parameters tuned by LoRA are already very few. Though SEAL can theoretically tune much less parameters, it may not significantly save much time. \n\n3. There is little detailed description about the procedures of the method in the abstract or introduction. This will make it hard for readers hard to grasp the method quickly. There usually should be a paragraph included in the introduction to describe the specific operation of the method.\n\n4. The curve of data points in Figure 4 (a) may be too small, making it hard to clearly see the changes."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024seal,\ntitle={{SEAL}: Scaling to Emphasize Attention for Long-Context Retrieval},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yRd4loGAhJ},\nnote={under review}\n}"
},
"abstract": {
"value": "In this work, we introduce a novel approach called Scaling to Emphasize Attention for Long-context retrieval (SEAL), which enhances the retrieval performance of large language models (LLMs) over extended contexts. Previous studies have shown that each attention head in LLMs has unique functionality and collectively contributes to the overall behavior of the model. Similarly, we observe that specific heads are particularly related to long-context retrieval and are positively or negatively correlated with retrieval scores. Building on this insight, we propose a cost-efficient, learning-based mechanism to emphasize these heads, improving the model's performance in long-context retrieval tasks. By applying SEAL, we achieved significant improvements in in-domain retrieval performance across various tasks and considerable improvement in the cross-domain document QA task of LongBench. Additionally, when combined with existing training-free context extension techniques, SEAL extends the context limits of LLMs while maintaining highly reliable outputs, opening new avenues for research in this field."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language models",
"long context",
"retrieval",
"attention",
"supervised fine-tuning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e1edd1b3de7df03a7dd2a1c7bd6d318670bd1c57.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "SEAL: Scaling to Emphasize Attention for Long-Context Retrieval"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ySJSGZxN7M | Dual-Branch HNSW Approach with Skip Bridges and LID-Driven Optimization | main | Active | Nearest Neighbor Search;Optimization | optimization | 1;5;5 | 5;4;3 | 1;3;2 | 1;2;2 | 1;3;2 | 3.666667 | 4 | 2 | 1.666667 | 2 | -0.866025 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- The paper is not very straightforward to understand as it lacks a running example. I really don't know what exactly \"exclude_set\" contains. A running example or any pictorial example in Figure 3 helps.\n\nThe definition of LID (x) is unclear. LID(x) reads like query dependent, or say LID(q) is something we want. That means there will be a property per graph/query pair, but in Figure 2/3, there seem to be multiple high LID nodes. I'm not sure what exactly LID means."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- Work on an important problem for practical application."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a branching scheme to accelerate HSNW search. Supposedly, this scheme could find out some more useful starting point in the layer-0 of the HSNW method. The experimental setup is completely wrong and thus I can't draw a conclusion that experiments validate the claim."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I implemented HSNW from scratch and published HSNW-related algorithm in top data mining conferences before. The experimental setup is completely nonsense to me. It should sort of follow-up the ANN-benchmark setup and that's a more reasonable way to show results. \n\n- Apparently, the algorithm doesn't compare to the real HSNW implementation in wall clock time, and only compare their own variations. \n\n- The algorithm seems to be implemented in Python only, which is sort of contradicting to the point of AKNN. Most HSNW algorithm is implemented in C, and there is a reason for that. Many algorithm improvement can't really benefit the search as the real-world hardware doesn't support well for the operations; or the asymptotic analysis doesn't align well with the real operation cost. Unless the authors showed their modified algorithm can accelerate in C (doubtful as there are so many branching and that possibly will cause memory read busy), it's not very convincing. \n\n- It reads to me that the method is only adding more operations in top layers. So it has to at least numerically show how many IP/distance calculation it saves for the layer-0. Otherwise, the computational cost can only go up......\n\n- So even under the query time reported in Fig. 10, it's not very promising. Just based on the Figure 10, I will say it's not a really working method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "please refer to weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The dual-branch structure and LID-based insertion mechanism are well-motivated and novel.\n\n1. The experiments have (partially) shown the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper The proposed HNSW++ algorithm, which introduces a dual-branch structure, LID-based node insertion, and skip-layer bridges to address the limitations of the original HNSW, such as local optima and cluster disconnections. Experiments have shown the that the proposed method is competitive both in performance and in inference speed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My major concerns are about experiments.\n\n1. The proposed method is implemented in Python, which is not a good programming language model for comparing speed. The authors may provide a theoretical time complexity analysis to complement their empirical results. This would allow a fairer comparison of the algorithm's efficiency across different implementations.\n\n1. The baselines are relatively weak, I did not see the advanced methods (e.g., IVFPQ) used in faiss[1]. I'd like to have authors justify their choice of baselines and explain why stronger baselines were not included.\n\n[1] Johnson, Jeff, Matthijs Douze, and Hervé Jégou. \"Billion-scale similarity search with GPUs.\" IEEE Transactions on Big Data 7.3 (2019): 535-547."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The paper introduces an optimization algorithm for HNSW, including pseudocode for the dual-branch structure, LID-based insertion, and bridge-building techniques would enhance clarity and understanding."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1)This paper presents a novel enhancement to the HNSW algorithm, addressing key limitations related to local optima and inference speed. \n2)The research is evaluated across diverse benchmarks, including datasets from Computer Vision (CV), Deep Learning (DL), and Natural Language Processing (NLP). The experiments clearly support the proposed method's superiority in both accuracy and speed, with substantial performance gains.\n3)The paper is well-written, with clear explanations of complex concepts and methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an enhanced Hierarchical Navigable Small World (HNSW) algorithm addressing limitations in local optima and scalability by introducing a dual-branch structure with LID-based insertion and a bridge-building shortcut technique. These innovations improve cluster connectivity, capture outliers more effectively, and reduce inference time. Experimental results across NLP, DL, and CV datasets demonstrate notable accuracy and speed improvements over the original HNSW algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1)The related work section is overly concise, lacking a comprehensive review of current research, which limits contextual understanding of the contributions.\n2)Although the paper claims improvements in inference speed, Figures 9 and 10 show only modest gains, casting doubt on the practical significance of this claim.\n3)The experimental evaluations are relatively limited, with insufficient algorithmic comparisons; in particular, using only the GLOVE dataset for NLP benchmarks diminishes the persuasiveness of the results in this domain."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an improved HNSW algorithm that tackles local optima and slow inference issues by using a dual-branch structure, LID-based insertion and bridge-building shortcuts, resulting in faster and more accurate performance across various datasets"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dualbranch,\ntitle={Dual-Branch {HNSW} Approach with Skip Bridges and {LID}-Driven Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ySJSGZxN7M},\nnote={under review}\n}"
},
"abstract": {
"value": "The Hierarchical Navigable Small World (HNSW) algorithm is widely used for approximate nearest neighbor (ANN) search, leveraging the principles of navigable small-world graphs. However, it faces some limitations. The first is the local optima problem, which arises from the algorithm's greedy search strategy, selecting neighbors based solely on proximity at each step. This often leads to cluster disconnections. The second limitation is that HNSW frequently fails to achieve logarithmic complexity, particularly in high-dimensional datasets, due to the exhaustive traversal through each layer. To address these limitations, we propose a novel algorithm that mitigates local optima and cluster disconnections while improving inference speed. The first component is a dual-branch HNSW structure with LID-based insertion mechanisms, enabling traversal from multiple directions. This improves outlier node capture, enhances cluster connectivity, and reduces the risk of local minima. The second component introduces a bridge-building technique that adds shortcuts between layers, enabling direct jumps and speeding up inference. Experiments on various benchmarks and datasets showed that our algorithm outperforms the original HNSW in both accuracy and speed. We evaluated six datasets across Computer Vision (CV), deep learning (DL), and Natural Language Processing (NLP), showing improvements of 2.5% in NLP, 15% in DL, and up to 35% in CV tasks. Inference speed is also improved by 12% across all datasets. Ablation studies revealed that LID-based insertion had the greatest impact on performance, followed by the dual-branch structure and bridge-building components."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Nearest Neighbor Search",
"Optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/fc497b2e112a5f73f804b2db0e13b3669d2fb905.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/cd7d84db7e5440a4b68789eb4c0a6ebd924e8cb5.zip"
},
"title": {
"value": "Dual-Branch HNSW Approach with Skip Bridges and LID-Driven Optimization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ySRsm6HDy5 | Breaking the Curse of Multiagency in Robust Multi-Agent Reinforcement Learning | main | Active | Multi-agent reinforcement learning;Robust Markov games;Game theory;Distribution shift | learning theory | 5;5;5;5 | 3;4;4;3 | 2;3;3;2 | 2;2;3;2 | 2;2;3;3 | 5 | 3.5 | 2.5 | 2.25 | 2.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "The ethical concerns are only limited to the concerns for any MARL algorithm: that caution must be exercised regarding potential biases and harmful outcomes when using these in real-world use cases."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the $(s, \\mathbf{a})$-rectangularity condition? An explicit definition for this would be useful to readers with limited background in this class of problems. Can the authors provide a brief formal definition of the $(s,a)$-rectangularity condition in the main text, along with a sentence or two explaining its significance in robust MDPs/Markov games?\n2. How are the uncertainty sets defined in this work practically meaningful? Can the authors to provide 1-2 concrete examples of how their fictitious uncertainty sets could model real-world uncertainties in multi-agent systems, and how this compares to uncertainty modeling in previous work?\n3. What are the real-world scenarios which are modelled well by this class of RMGs with fictitious uncertainty sets?\n4. When the authors compare with prior art in Table 1, aren’t the game models also different in the different works? Are there any trade-offs due to this?\n5. Is the sample complexity improvement obtained by the authors primarily dependent on their robust Markov game definition which is different from the other works in literature?\n6. Doesn’t the uncertainty set become very large and potentially intractable in problems with large state spaces?\n7. Is it necessary that the uncertainty set be defined as part of the problem? In practical use cases, how does one define these sets? Specifically, can the authors discuss guidelines or heuristics for how practitioners could define appropriate uncertainty sets for real-world multi-agent problems, and whether there are ways to learn or adapt the uncertainty sets from data?\n8. While I agree that the proofs are too long to be included in the main paper, but considering the fact that the primary contribution of this work is theoretical, it would be very helpful if the authors can provide short proof sketches or key proof ideas for all their claims in the main paper itself."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well written. All concepts and claims are clearly defined and claims are backed by theoretical justifications.\n2. While the authors provide a detailed theoretical analysis, I am unable to verify the correctness of their results, though I could not find any error.\n3. The authors clearly state their algorithm which can be applied to practical problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a new multi-agent problem class, which is a new class of robust Markov Games with fictitious uncertainty sets. The authors define solution concepts – robust Nash equilibrium and robust coarse correlated equilibrium for this new class of games and also prove the existence of these. The authors then propose a novel algorithm called Robust-Q-FTRL to find robust CCE for their class of robust Markov Games and also establish its sample complexity. Using their approach in their class if problems, the authors break the curse of multi-agency in MARL."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors do not provide any numerical examples. While they claim that their approach is scalable, some numerical examples will be useful to understand scalability and the sample efficiency claims better. Can the authors include a small simulation study to demonstrate the scalability and sample efficiency of their approach compared to baselines, even if only on a toy problem?\n2. The authors do not provide any comparison of their approach with other approaches in literature in terms of numerical computations.\n3. The authors do not comment on whether the policies learned using their algorithms are strategy proof from a learning perspective, i.e., do they perform well when other agents use different learning algorithms."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Questions:\n\n1. Does the definition of the policy imply that the final solution can be an un-stationary policy?\n2. Are the fictitious uncertainty sets un-stationary? That is, does each period have a distinct uncertainty set?\n3. In a robust Markov game (MG) setting where the uncertainty set differs at each time step, is it valid to interchange the max operators in the definition of the optimal robust value function at each time step?\n4. You assume that sampling from the true nominal transition kernel is possible. However, if this is feasible, why is robustness necessary? To address the sim-to-real gap, wouldn’t it be more appropriate to assume that sampling from the true nominal transition kernel isn’t possible?\n5. Additionally, could you explain why the estimation method in Algorithm 1 differs from that in standard MG settings?\n6. My understanding of the fictitious uncertainty set is that it involves a policy-based adjustment to the classic reference transition kernel (nominal transition kernel), which is then used as the basis for constructing the uncertainty set. This implies two rounds of adjustment in creating the uncertainty set, adding an extra step compared to traditional uncertainty sets. Does this make the new uncertainty set more conservative? How do you quantify the difference between the fictitious uncertainty set and a conventional uncertainty set without the initial adjustment?\n7. Could you elaborate on the reason for breaking the curse of multiagency? Is it mainly due to the FTRL algorithm, or does the fictitious uncertainty set contribute to this outcome?\n8. Could additional experimental results be provided to verify the convergence of the algorithm and demonstrate that the robust policy in this setting achieves better robustness compared to the classical (s, a)-rectangularity setting?\n9. Line 65, Kearns & Singh, 1999 is not about solving finite-horizon multi-player general-sum Markov games, why cite this?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Strengths:\n\n1. The main body of this paper is well-written.\n2. The proposed uncertainty set is interesting.\n3. The proposed algorithm effectively reduces sample complexity, avoiding the exponential growth issue in multi-agent systems caused by the joint action space."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the challenge of the “curse of multiagency” in robust MARL by introducing a new class of robust Markov games with a fictitious uncertainty set. This kind of uncertainty set allows any one agent to consider uncertainties arising from both the environment and the policies of other agents. Then, the authors prove the existence of robust Nash equilibrium and propose an algorithm Robust-Q-FTRL, which learns an approximate coarse correlated equilibrium with polynomial sample complexity, thus breaking the curse of multiagency in RMG for the first time. No experiment validation is provided."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weakness:\n\n1. The approach assumes access to a generative model with a true nominal transition kernel for sample generation, which is infeasible in real-world applications.\n2. Although the paper provides theoretical evidence for overcoming the curse of multiagency, further experimental validation is needed to determine whether the algorithm itself breaks the curse or if the new problem formulation inherently possesses this advantage, potentially enabling other algorithms to achieve similar results. In addition, additional experimental evidence is needed to demonstrate that the new uncertainty set yields more robust policies compared to the previously classic (s, a)-rectangularity uncertainty sets.\n3. While reading through the proof section, I noticed typos and unclear expressions, e.g., on lines 1044, 1170, and 1175. Additionally, the proofs lack tight cohesion between sections as seen in the main body, and there are missing references to some essential equations, which makes the reading experience somewhat challenging."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could the authors discuss the challenges involved in deriving sample complexity bounds for robust NE within the existing framework? Additionally, proposing the derivation of robust NE sample complexity as a direction for future work would help address the question of completeness and open avenues for further research.\n2. Could the authors include a concrete example or comparison showing how the policy-induced $(s, a_i)$-rectangularity condition improves sample complexity relative to the $(s, a)$-rectangularity condition used in previous work? Such a comparison would help readers understand the practical impact of this new approach.\n3. Could the authors clarify why separate constraints on $N$ and $K$ are presented, rather than combining them as $N = KH$? If there are specific advantages or insights gained from keeping these constraints separate, an explanation would enhance the clarity and completeness of the analysis.\n4. The paper lacks clear organization and contains numerous typos. For instance, on page 3, it refers to \"Section 2.2 for details,\" whereas the relevant content is actually in Section 3.1. Additionally, the algorithm description does not appear until page 8. A thorough proofreading pass to correct section references and typographical errors is also recommended to enhance readability."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper proposes a sample-efficient algorithm for robust general-sum and Markov games (RMGs), breaking the curse of dimensionality in RMGs and achieving optimal sample complexity for CCE with respect to the action space size."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a sample-efficient algorithm for robust general-sum and Markov games (RMGs), breaking the curse of dimensionality in RMGs and achieving optimal sample complexity for CCE with respect to the action space size."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The sample complexity optimality with respect to $A_i$ is achieved at the expense of an increased horizon $H$. Additionally, while the existence of a robust Nash equilibrium (NE) is proven, only the sample complexity for robust CCE is derived, which affects the completeness of the work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Is the proposed Algorithm 1 overall computationally tractable? I found a related discussion in Sec. 4.2, but it is about the Q value estimation."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The robust multi-agent learning setting is interesting and reasonable to me. \n\nAlthough under a different setting compared with previous works considering rectangular uncertainty set, the sample complexity results \"break the curse of multi-agency\"."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies robust reinforcement learning in the multi-agent setting. The authors propose a new uncertainty set called \"fictitious uncertainty set\", where depends on the joint policy of agents. Based on the new notion, they establish the existence of robust NE and CCE. After that, they propose the Robust-Q-FTRL algorithm, and under the generative model assumption, they establish sample complexity only scaling with the number of actions of all agents."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **About paper writing**: I think there are several parts of the paper writing should be improved to avoid confusion.\n\n* If I understand correctly, starting from Section 3.2, the definition of value functions are associated with the fictitious RMGs, instead of the rectangular uncertainty sets. In another word, one should interpret $V^{\\pi,\\sigma_i}_{i,h}$ through the definition in Eq.(26), instead of Eq. (5), which are different in the uncertainty set w.r.t. the inf operator.\n\n However, I did not find a declaration of this abuse of notation (and personally, I would recommend use different notations to avoid confusion). This is crucial because it makes significant difference in how to interpret the results. Under the definition of Eq.(26), the uncertainty set varies for different policies $\\pi$, rather than that one first fix the uncertainty set by an arbitrary reference policy $\\pi$ and then use that to define the NE/CCE.\n\n* The \"technical insights\" paragraph in Section 4.3 does not explain clearly how the proposed algorithms avoid the curse of multi-agency. This is crucial to evaluate the technical novelty of this work (also see my second point in the following).\n\n2. **The fundamental reason for avoiding curse of multi-agency is not clear to me**. \n\n* Comparing with previous works, this paper consider a different uncertainty set, it is not clear to me whether it is the key variation making the problem more tractable than before. Especially, by definition, the new uncertainty set only quantifies the uncertainty of the (weighted) marginal transition function. If this is the essential reason, claiming avoiding curse of multi-agency as advantage over previous work would be unfair.\n\n* Besides, this paper relies on generative model assumption, I'm also curious whether it is a common assumption in previous literature?\n\n3. **The proposed uncertainty set may not be sufficient to quantify the uncertainty**. I'm curious what would be the benefits of considering new uncertainty set, and is it reasonable to consider such set in practice? For example, under what scenarios (what kind of model difference between simulator and practice), the propose uncertainty set would be useful.\n\nBesides, technically speaking, the new uncertainty set may be insufficient to capture the sim-to-real gap. Note that the ratio $\\pi_h(a_i,a_{-i}|s) / \\pi_{i,h}(a_i|s)$ could varies a lot for different $a_{-i}$, given that this paper considers correlated policies. Consider the scenario where the sim-to-real gaps of $P^0_{h,s,\\textbf{a}}$ are very large for some $(s,\\textbf{a})$, while, coincidently, the learned CCE only has low ratio $\\pi_h(a_i,a_{-i}|s) / \\pi_{i,h}(a_i|s)$ on them. In that case, fictitious uncertainty set is not sufficient to quantify the uncertainty."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024breaking,\ntitle={Breaking the Curse of Multiagency in Robust Multi-Agent Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ySRsm6HDy5},\nnote={under review}\n}"
},
"abstract": {
"value": "Standard multi-agent reinforcement learning (MARL) algorithms are vulnerable to sim-to-real gaps. To address this, distributionally robust Markov games (RMGs) have been proposed to enhance robustness in MARL by optimizing the worst-case performance when game dynamics shift within a prescribed uncertainty set. Solving RMGs remains under-explored, from problem formulation to the development of sample-efficient algorithms. A notorious yet open challenge is if RMGs can escape the curse of multiagency, where the sample complexity scales exponentially with the number of agents. In this work, we propose a natural class of RMGs where the uncertainty set of each agent is shaped by both the environment and other agents' strategies in a best-response manner. We first establish the well-posedness of these RMGs by proving the existence of game-theoretic solutions such as robust Nash equilibria and coarse correlated equilibria (CCE). Assuming access to a generative model, we then introduce a sample-efficient algorithm for learning the CCE whose sample complexity scales polynomially with all relevant parameters. To the best of our knowledge, this is the first algorithm to break the curse of multiagency for RMGs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-agent reinforcement learning",
"Robust Markov games",
"Game theory",
"Distribution shift"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a4467f70b8ae967ad795e2b13688c47bbcde1621.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Breaking the Curse of Multiagency in Robust Multi-Agent Reinforcement Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ySmovxuDMi | HIVEX: A High-Impact Environment Suite for Multi-Agent Research | main | Active | Environment Benchmark;Multi-Agent Reinforcement Learning;Multi-Agent Systems;Critical Ecological Challenges | datasets and benchmarks | 3;3;5;6 | 4;4;4;3 | 2;1;3;3 | 2;1;2;3 | 3;2;3;3 | 4.25 | 3.75 | 2.25 | 2 | 2.75 | -0.777778 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the Weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. It provides comprehensive code for both evaluation and training.\n2. The task design is well-structured, and the benchmark’s support for image input is both rare and crucial for advancing multi-agent reinforcement learning (MARL) development."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces HIVEX, a benchmark for multi-agent reinforcement learning that addresses critical ecological challenges. It includes five environments with varying levels of difficulty and supports over ten agents, accepting both vector and image inputs. Episodes can last up to 5000 steps. The benchmark provides PPO as a baseline, running through different environments and presenting the results."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The methods tested are quite limited, with only PPO evaluated. Have you considered testing additional methods like MAPPO or MAA2C?\n2. The reward structure doesn’t clearly reflect the difficulty levels of the tasks. What is the best performance achievable with an optimal policy?\n3. The role of image input is unclear. How does your method utilize visual inputs? If tasks are still achievable without image input, what is the intended value of including them?\n4. The advantages of HIVEX compared to benchmarks like Melting Pot and Neural MMO remain unclear.\n5. How can this benchmark be expanded? Is it easy for users to create custom tasks or modify existing ones within your benchmark?\n6. Lack of innovation in new methods. How could the training pipeline or algorithm be modified to achieve better results?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the Weaknesses above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work uses the Unity engine to create multiple environments representing ecological challenges and allows each environment to have different difficulties. Moreover, each environment provides both vector observations and visual observations, which is more realistic than many existing environments\n\n2. Some environments allow their features to be procedurally generated and the test-time evaluation can use environments that have never been seen.\n\n3. This work provides evaluation results for each environment which can serve as the baselines for future works."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a benchmark for multiagent research. The benchmark provides environments about ecological challenges including wind farm control, wildfire resource management, drone-based reforestation, ocean plastic collection, and aerial wildfire suppression. This paper claims that the proposed benchmark presents more realistic environments than existing benchmarks, so the findings in the proposed benchmark have more potential to be equipped for real-world problems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper claims that the proposed environments are more realistic than existing environments. However, according to the description of the paper, this is done only by adding visual representations and low-level action/state space. It is unclear how accurately the environments can simulate the real-world environment, for example, the uncertainty of state transition. Therefore, the sim-to-real gap can still be considerably large. I know that the authors have discussed this in the limitation section, but it is indeed a problem that weakens the contribution of this work.\n\n2. This paper proposes environments for multiagent research. However, when generating baselines for evaluation, this work only tests the performance of the PPO algorithm. Why not try some multiagent RL algorithms as the environments are for multiagent research?\n\n3. The authors claim that Wildfire Resource Management and Ocean Plastic Collection are excluded from scalability tests because of fixed layout, agent count, and amount of plastic. Why these are fixed? The environments should allow for configuration like other environments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Does the framework offer a user-friendly interface that allows developers to modify parameters such as the reward function and the number of agents?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Each environment is accompanied by lucid diagrams, aiding the reader in comprehending the numerous influencing factors and configurations present. The visually appealing interface also intuitively conveys the rendering capabilities of the environment.\n2. The RELATED WORK section is thorough and informative, contrasting a multitude of existing multi-agent benchmarks, thereby enabling readers to grasp the current landscape of benchmark development in the multi-agent field and the underlying rationale for this study.\n3. The experimental section includes assessments conducted at different difficulty levels for each environment, indicating substantial untapped potential for foundational reinforcement learning algorithms. This offers researchers objective comparative data to understand the essential characteristics of each environment and its respective challenges."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel benchmark designed for multi-agent control systems, specifically tailored to address the challenges posed by climate change. This paper opens by emphasizing the critical nature of climate change and underscores the significance of climate research, thereby establishing the relevance and importance of the proposed benchmark. It proceeds to offer succinct introductions to each environment within the benchmark, detailing their contents and associated tasks via visually engaging interfaces and foundational setup descriptions. Subsequently, the paper employs classical PPO algorithms to execute comprehensive experiments across all environments and varying levels of complexity within the benchmark, effectively illustrating its exploratory potential and validity. Furthermore, it furnishes baseline performance metrics, facilitating future research endeavors on the benchmark. Overall, this work presents an innovative multi-agent cooperation benchmark within the climate domain, addressing the deficiency of suitable experimental platforms in this area and offering robust support for subsequent investigations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "environments or tasks? Highlighting the challenges or immediate needs for simulation environments within this domain could aid readers new to the field in appreciating the necessity of developing such a simulation environment.\n2. The descriptions of the environment setups appear fragmented, detracting from the overall coherence of the manuscript. Consolidating these details into a unified format, perhaps through a tabulated summary of ENVIRONMENT SPECIFICATIONS alongside brief overviews of key points, would enhance clarity and conciseness.\n3. Despite being a pioneering effort in proposing a climate-focused benchmark, the paper would benefit from a tabular comparison highlighting the attributes of the proposed benchmark relative to other multi-agent environments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- From Figure 26 onwards, the epsilon parameter was set to 0.2 in all experimental tests. However, according to the standard procedure in the PPO algorithm, epsilon should generally be set to 0 during testing, as this phase is intended solely to evaluate the performance of the trained policy without any further updates. Setting epsilon to a non-zero value may introduce unnecessary policy perturbations, potentially biasing the test results. I recommend that the authors revisit this configuration and set epsilon to 0 in testing to ensure the accuracy and consistency of the experimental results.\n- In Figure 144, the caption is too close to the figure, and other figures have similar problems.\n- \"Crossing the environment's boundary (a 1500x1500 square surrounding a 1200x1200 island) results in a negative reward of −100.\" However, in Table 6, from task 1 to task 9, the reward for crossing the border is 1, which does not match the description in this paper. For example, in task 8, if the agents' goal is to find fire, then the reward 'Crossed Border' should be set to 0."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is generally easy to follow and interesting to read.\n- Prior work hasn't extensively studies multi agent research on ecological challenges.\n- Drone-Based Reforestation is an interesting cooperative MARL task. The agents need to pick up seeds and recharge at the drone station, explore fertile ground near existing trees, and drop seeds while ensuring sufficient battery charge to return to the station."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes HIVEX which is an environment suite to benchmark multi-agent research focusing on ecological challenges. The environments include Wind Farm Control, Wildfire Resource Management, Drone-Based Reforestation, Ocean Plastic Collection, and Aerial Wildfire Suppression. Although Drone-Based Reforestation is an interesting cooperative MARL task, the other environments do not seem to be related to MARL."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Wind Farm Control: In the Wind Farm Control environment, the agents' primary task is to adjust wind turbines to positions aligned against the wind direction to maximize energy generation, receiving rewards based on each turbine's performance. However, there is no mention of direct interactions or mutual influence among the multiple agents, nor is it clearly stated whether cooperation or competition exists between them. \n- Wildfire Resource Management: Is \"neighbouring watchtowers\" referring to other agents? If so, why is there only 3 neighbors per agent? If not, then this environment is unrelated to MARL, as there is no competition or cooperation involved.\n- Ocean Plastic Collection: What is the advantage of implementing this environment in Unity. Is it merely to make the demonstration of the environment look more visually impressive."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A suite of multi-agent environments aimed at real-world critical ecological challenges, offering benchmarks for advancing research in areas like wildfire management, ocean plastic collection, and drone-based reforestation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hivex,\ntitle={{HIVEX}: A High-Impact Environment Suite for Multi-Agent Research},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ySmovxuDMi},\nnote={under review}\n}"
},
"abstract": {
"value": "Games have been vital test beds for the rapid development of Agent-based research. Remarkable progress has been achieved in the past, but it is unclear if the findings equip for real-world problems. While pressure grows, some of the most critical ecological challenges can find mitigation and prevention solutions through technology and its applications. Most real-world domains include multi-agent scenarios and require machine-machine and human-machine collaboration. Open-source environments have not advanced and are often toy scenarios, too abstract or not suitable for multi-agent research. By mimicking real-world problems and increasing the complexity of environments, we hope to advance state-of-the-art multi-agent research and inspire researchers to work on immediate real-world problems. Here, we present HIVEX, an environment suite to benchmark multi-agent research focusing on ecological challenges. HIVEX includes the following environments: Wind Farm Control, Wildfire Resource Management, Drone-Based Reforestation, Ocean Plastic Collection, and Aerial Wildfire Suppression. We provide environments, training examples, and baselines for the main and sub-tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Environment Benchmark",
"Multi-Agent Reinforcement Learning",
"Multi-Agent Systems",
"Critical Ecological Challenges"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/2d737abe9ee19e08791608744d2643b1cbf96121.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "HIVEX: A High-Impact Environment Suite for Multi-Agent Research"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yTEwmr1TJb | Robots Pre-train Robots: Manipulation-Centric Robotic Representation from Large-Scale Robot Dataset | main | Active | Robot Learning;Foundation Model;Representation Learning | applications to robotics, autonomy, planning | 3;6;6;8 | 4;4;5;4 | 2;3;3;3 | 2;2;3;3 | 3;3;4;3 | 5.75 | 4.25 | 2.75 | 2.5 | 3.25 | 0.080845 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Why is it still necessary to attend to the manipulator region when there is already proprioception input?\n- What is the evaluation protocol in the real world? How are the environments varied between training and evaluation, and what would count as a success?\n- By attending the robot and task-relevant object, technically this would allow the representation to have stronger generalization capabilities. Perhaps the author should run some visual generalization experiments to find out if RPM enables better generalization."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is clear and well presented\n- The simulation experiments are rigorous and have a lot of diversity\n- The paper presents a novel metric for evaluating visual representations for robotics, and novel findings on how to integrate robotics information (states, actions) for training better representations"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper first proposes manipulation centricity, a new metric for evaluating representations for robotics, and finds that it is a strong indicator of success rates in downstream robotic manipulation tasks. Building on this insight, it proposes RPM, a framework for learning visual representation for robotics from large-scale robotics dataset. Experiments in both simulation and the real world show that RPM leads to improved manipulation centricity and higher task success rate when evaluated using behavior cloning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There could be more empirical analysis and intuition regarding how each loss affects manipulation centricity. Authors could explain intuitively how each loss contributes to better MC and visualize the gradcam of an encoder trained with each loss\n- The paper lacks comparison to more state-of-the-art baselines, such as VIP/LIV, or object-centric pre-trained representations such as POCR/HODOR\n- As I understand, the goal here is to make the representation pay attention to task-relevant regions e.g. robot and task-relevant objects. The authors have already demonstrated that this information can be obtained using SAM2. Have the authors considered directly using the masks as a form of supervision for training the representation? I think this could serve as a baseline, since RPM requires additional priveleged information that only a robotics dataset has, i.e. state and action, whereas we can generate segmentation masks for any general sources of data.\n- The paper could benefit from more extensive real robot experiments"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the tradeoff between the visual realism of the simulator vs simulator-specific training required to achieve high success rate and manipulation centricity? For example, in RoboCasa, manipulation centricity seems really low compared to others. How to compare it across simulators?\n\n1. How does \"manipulation centricity\" depend on the choice of camera intrinsics and extrinsic with respect to the robot body frame? \n\n1. R3M has been shown to be a poor visual representation model for robotic tasks, as compared to visual datasets [1] and pretraining image distribution matters. Why none of the baselines involve vision only representation backbones for comparison?\n\n[1] Dasari et al, 2023. An Unbiased Look at Datasets for Visuo-Motor Pre-Training, https://data4robotics.github.io/"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Detailed empirical validation across diverse tasks and simulation domains, plus real robot experiments\n1. well-motivated concept of manipulation centricity with clear metrics\n1. ablation studies and analysis of design choices \n1. insights about benefits of robot vs human datasets for pre-training\n1. Clearly written key research questions and contributions"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes pre-train visual representations for robot manipulation by focusing on manipulation centricity, robot datasets (instead of human activity datasets), and auxiliary loss objectives for dynamic alignment and time contrastive learning. To measure Manipulation Centricity, they use Jaccard similarity between the binarized Grayscale CAM and SAM2’s foreground vs. background predictions. The proposed model RPM outperforms simulation and real robot tasks compared to baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The work presents insights into the visual representations that focus on manipulation centricity and dynamic alignment perform better than existing approaches. However, there are certain assumptions whose implications are not clearly discussed.\n1. DROID is chosen as the robot dataset of choice instead of other larger dataset in OXE.\n1. RPM is a ResNet-based model instead of directly using / comparing to pretrained models like SAM2, etc.\n1. The \"manipulation centricity\" does not seem to improve at similar scale across different simulators, with respect to success rate. \n1. A concise definition of manipulation centricity and how it is calculated should be present in the main paper, rather than existing only in appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Congratulations on this work and thanks for contributing it to the robot learning community. \n\nI wished to clarify if during policy training does the visual encoder always remains frozen and whether you finetune the entire architecture? In the first part of the paper when evaluating your introduced metric it is clear that the encoder is frozen, I wished to confirm whether this is also the case for the reported task success rates and whether you considered finetuning the entire architecture?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper validates a pretraining objective for visual encoders that improves the performance of robot policy learning. \n\n- The paper introduces a novel metric for quantitatively analysing the features generated by visual encoders.\n\n- The paper validates its claims in both simulated and real-world experiments.\n\n- The paper includes analysis of the representations learnt in various approaches.\n\n- The paper is concise and clearly presents the authors claims and results to validate these claims."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a metric (manipulation-centricity) for assessing the ability of pretrained visual encoders to model task-relevant features for robot manipulation tasks; as well as a novel pretraining objective (RPM) that incorporates the robot dynamics information available in robotics datasets. The authors first establish a correlation between the metric they define and the success rate of policies that leverage the features generated by pretrained visual encoders. Having established a positive correlation they seek to design a pretraining objective for visual encoders and evaluate its performance on their proposed metric and the downstream policy success rates. The novelty in the pretraining objective the authors introduce is a dynamics alignment term which takes the form of the InfoNCE applied to embeddings of a sequence of state action pairs and image embeddings. This novel dynamics alignment term is combined with existing behaviour cloning and temporal contrastive terms to give the full learning objective. They apply this objective function to pretrain visual encoders for manipulation tasks and demonstrate that this pretraining objective leads to improvements on their metric and downstream task performance in both simulated and real-world experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper appears to focus on pretraining and freezing visual encoders while training policies on top of these encoders. It wasn't clear if they finetune all parameters on a given task (in the simulation and real-world policy learning experiments). In general, this may be relevant as policy performance can improve with finetuning of the entire architecture. I may have missed this in the paper but it would be good to see if the pretraining results include finetuning of the overall architecture on the robot manipulation task. \n\n- The similarity scores for the introduced metric are quite small and rely on thresholding the binarization of the Grad-Cam output. It seems like this is a good proxy for rudimentary manipulation tasks but it doesn't necessarily seem to be a metric that will generalise to more complex settings. The metric also relies on annotations or knowledge of the task relevant features within the image. \n\n- The pretraining dynamics alignment term incorporates chunks of state-action pairs with a history of 3 being cited as optimal. When it comes to highly dynamic tasks it is not clear how the alignment between individual images and such a history will necessarily be beneficial. For instance a single static image does not necessarily convey rich signals for dynamics (there is some signal but it is imperfect) when compared to a history of images, aligning a state action history and a static image seems counterintuitive to me, I question whether the improvements observed are optimal relative to other methods of incorporating dynamics information. If the dynamics varied quite a lot between demonstrations on the same robot platform I question how this will effect performance. Clearly this term is working as demonstrated in the paper but I do have reservations over its usefulness and whether it is the optimal approach to incorporating dynamics information."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "(see weakness)"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The proposed heuristics of using gradient heatmap to indicate manipulation performance is interesting. It can be linked to existing work in visual affordance prediction. \n2. Aligning the vision representation with the robot proprio state and actions is interesting, and may encourage the model to focus more on task-relevant information. \n3. The writing is easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a representation learning approach named RPM that learns to encode images into low-dimensional states that are aligned with the proprioception state through contrastive learning. \n\n1. The authors showed that RPM outperforms existing learned robot representations on 20 tasks selected from 4 benchmarks. \n2. The authors proposed to use gradient heatmap as a heuristic named as \"manipulation-centricity\" to determine how much the learned representation contributes to the final manipulation task performance. \n3. RPM is showed to have highest manipulation-centricity than existing learned robot representations, and is evaluated with a real UR5 robot in lift, sweep and rearrange tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The technical contribution is limited. The authors spent 4 pages between the introduction and experiment section to discuss their method. But, in fact, the only new technical point that the authors proposed is equation (1), which uses InfoNCE loss to bring visual features and proprio state/aciton closer. This looks hand-wavy. Even this innovation raises questions, as the proprio state/action only describe the robot trajectory but not the visual scenes. This is very likely to make the model overfit to the learned scenarios. \n\n2. The evaluation is questionable. RPM is evaluated on 20 tasks, 10 from MetaWorld, 4 from DexArt, 3 from RoboCasa, 3 from RoboMimic. However, these benchmarks combined include hundreds of tasks. Why just select these 20 particular tasks? Moreover, the \"Can\" task in robomimic is mentioned as challenging in this paper, but it is just a simple pick-and-place task of a moderate-size can. The evaluation raises serious questions about the generalization of this RPM method. \n\n2. Unnecessary discussion on how to use the human video datasets. The authors spend a considerable amount of efforts to explain the policy learning from human videos. But in the end, the conclusion is to not use human data. Human action data is abundant and contains rich and diverse behavior. If the authors' proposal is to not use them and use robot demonstrations, which in fact is the standard robot learning setup. I would prefer the paper to focus on the studied setting. \n\n3. Needs a more comprehensive ablation study. RPM is pre-trained with the DROID dataset, a large and relatively clean robot dataset, while many baselines that RPM compared against are not trained with DROID. \n\n4. In Table.1, HRP and VC-1 achieves better success rate than R3M-DROID, yet R3M-DROID's gradient heatmap focus more on the objects. According to the key finding, R3M-DROID shall perform better. \n\n4. The term \"dynamics\" is mis- and over-used, confusing in some sections."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Manipulation-centric robotic representation training on large-scale robot dataset boosts policy performance on manipulation tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024robots,\ntitle={Robots Pre-train Robots: Manipulation-Centric Robotic Representation from Large-Scale Robot Dataset},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yTEwmr1TJb},\nnote={under review}\n}"
},
"abstract": {
"value": "The pre-training of visual representations has enhanced the efficiency of robot learning. Due to the lack of large-scale in-domain robotic datasets, prior works utilize in-the-wild human videos to pre-train robotic visual representation. Despite their promising results, representations from human videos are inevitably subject to distribution shifts and lack the dynamics information crucial for task completion. We first evaluate various pre-trained representations in terms of their correlation to the downstream robotic manipulation tasks (i.e., manipulation-centricity). Interestingly, we find that the “manipulation-centricity” is a strong indicator of success rates when applied to downstream tasks. Drawing from these findings, we propose $\\textbf{R}$obots $\\textbf{P}$re-train robots with $\\textbf{M}$anipulation-centricity ($\\textbf{RPM}$), a foundation representation learning framework capturing both visual features and the dynamics information such as actions and proprioceptions of manipulation tasks to improve manipulation centricity. Specifically, we pre-train a visual encoder on the DROID robotic dataset and leverage motion-relevant data such as robot proprioceptive states and actions. We combine a novel contrastive loss that aligns visual observation with robot proprioceptive state-action dynamics with a BC-like actor loss that predicts action during pre-training and a time contrastive loss. Empirical results across 4 simulation domains with 20 tasks verify that RPM outperforms the strongest baseline method by $\\textbf{15.6}$%. Moreover, RPM boosts the performance of data-efficient learning with a UR5e arm on 3 real-world tasks by $\\textbf{76.9}$%."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Robot Learning",
"Foundation Model",
"Representation Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/bccfb3d0ff37f006d850b4580da6341307b5acfb.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Robots Pre-train Robots: Manipulation-Centric Robotic Representation from Large-Scale Robot Dataset"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yUC8pU508S | APE: Faster and Longer Context-Augmented Generation via Adaptive Parallel Encoding | main | Active | Parallel Encoding; Context-Augmented LLM; Efficient Inference; Context Window Expansion | generative models | 3;5;5;6;6 | 4;4;4;3;2 | 4;3;3;3;3 | 2;2;2;3;2 | 2;3;2;4;3 | 5 | 3.4 | 3.2 | 2.2 | 2.8 | -0.684653 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Will we conclude anything between the task and the choice of hyperparameters?\n- The latency improvement in Table 2 would be more interesting if other baselines are included."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The method sounds intuitive. APE extends the usable context length for language models, overcoming limitations in the context window and enabling efficient handling of much larger inputs without additional training.\n- The performance is great esp. in latency\n- The analysis in Section 3 looks quite novel and interesting"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents Adaptive Parallel Encoding (APE) in order to improve efficiency and performance in language models handling RAG and ICL tasks. APE overcomes limitations in sequential encoding, which requires re-encoding context data and suffers from context window restrictions, by employing parallel encoding with adaptive adjustments. These changes include a shared prefix, scaling factors, and a modified attention temperature, aligning parallel encoding closely with sequential encoding. Experiments show APE achieves a high speedup in long contexts and outperforms slightly other methods in accuracy for long contexts."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The performance seems quite sensitive on specific parameters"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "* Would it be possible to include RULER as an evaluation for long-context task. It has become more standard for LC. Showing the impact of APE on needle in a haystack would be interesting as well.\n* Are the tasks where APE fails. For example, Does APE still hold when used for Humaneval or RepoBench?\n* What would be the impact of finetuning with examples that had APE applied to then. Would it stop degradation?\n* Can you provide an analysis of the impact of changing the \"window\" size you use for parallel decoding."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper evaluates across many different models of different architectures. \n* The method is training-free which enables it be easily tested across many model -- assuming a working implementation.\n* Shows significant gains on efficiency and would have an real-world impact on deployed models. For example, enabling Pre-caching contexts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The papers introduce a simple and effective modification to parallel decoding which can be used a training-free drop-in method to substantially increase efficiency of inference while retaining most of its accuracy."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Paper does not provide implementation of the method -- this can make it easier for reproducibility. Would it be possible to provide that?\n* The paper fixes a windows size but shows no empirical or theoretical analysis of choosing different sizes of this window: either from an efficiency or quality perspective. It would be great to have empirical analysis.\n* No complexity analysis written out. This could be particularly relevant if we want to vary window above."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Formatting issue: In Table 1 #413 Gemma-2-9B line has miss alignment."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The research focuses on a practical and significant challenge, particularly relevant to real-world LLM applications like RAG.\n2. The paper provides insightful observations and analysis regarding the feasibility of parallel encoding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the performance degradation issue in parallel encoding and analyzes the distribution patterns of key and query vectors. This paper analysis reveals that key vector distributions remain relatively similar and query vector distributions can be combined. Based on these findings, they propose APE (Adaptive Parallel Encoding) to mitigate performance losses in LLMs during parallel encoding. Specifically, APE incorporates three key components: shared prompt prefix, position-aware dynamic scaling, and attention temperature adjustment. We evaluated APE on three LLMs (Llama-3-8B, Llama-2-7B, Gemma-2-9B) across multi-document QA and few-shot ICL tasks. Results demonstrate performance improvements compared to full attention and baseline methods, with latency benchmarks showing significant speedup over full attention."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper demonstrates limited evidence for performance superiority over baselines across various tasks. Specifically:\n - The evaluation is restricted to only two task categories from LongBench (multi-document QA and few-shot ICL), which is insufficient to demonstrate the method's effectiveness across diverse scenarios.\n - For multi-document QA tasks, the evaluation is conducted on a limited subset of LongBench, with additional testing only on one RAG dataset in Sec 6.3. This narrow scope of evaluation fails to provide comprehensive evidence of the method's effectiveness. And Table 5 lacks crucial baseline comparisons, making it difficult to assess the relative performance improvements.\n - The evaluation of other task types in Sec 6.2 is inadequate, missing essential baseline comparisons needed for meaningful performance assessment.\n\n2. And this paper lacks sufficient component analysis and validation.\n - The paper lacks comprehensive ablation studies to isolate and validate the contribution of each proposed component. This makes it impossible to determine whether performance improvements stem from specific modules (e.g., attention temperature adjustment) or their combination.\n - There is insufficient analytical evidence demonstrating how the three proposed components effectively address the challenges identified in the earlier sections of the paper. The causal relationship between the proposed solutions and the observed improvements needs stronger empirical support.\n - The absence of detailed component-wise analysis makes it difficult to justify the necessity and effectiveness of each module in the proposed architecture."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How does the computational complexity of APE scale with increasing context length compared to sequential encoding?\n2. Why does the method perform particularly poorly on code completion tasks with continuous long contexts?\n3. How sensitive is the method to the choice of hyperparameters (scaling factor and attention temperature) across different models and tasks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors provide thorough empirical analysis to understand the behavior of attention mechanisms and KV states. \n2. The method is practical as it requires no additional training and can be implemented with minimal modifications to existing architectures. \n3. Comprehensive evaluation across multiple tasks (ICL, RAG, long-context understanding)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Adaptive Parallel Encoding, a method to improve the efficiency of large language models when processing multiple external contexts. APE pre-caches key-value states of contexts separately and enables position reuse during inference. The method consists of three key components: a shared prefix to align initial token distributions, a scaling factor to offset increased attention weights, and lower attention temperature to focus on semantically important tokens. The authors demonstrate that APE achieves a 976× speedup for long context generation while maintaining 93% accuracy compared to sequential encoding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The performance evaluation is primarily focused on 8k context length, which feels insufficient given that many open-source LLMs now support context lengths of 128k or more. This restricted evaluation scope makes it difficult to assess the method's scalability to longer contexts. \n2. Compared to sequential encoding, APE introduces a non-negligible performance degradation at the 8k length scale, raising concerns about its effectiveness at longer context lengths."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In efficiency analysis, the claim `976x faster` might be a little bit overclaiming because there is no performance (accuracy) evaluation on 512k. Can you add relevant benchmarks? (InfiniteBench: https://github.com/OpenBMB/InfiniteBench/, LOFT: https://github.com/google-deepmind/loft, RULER: https://github.com/NVIDIA/RULER)\n\n2. Table 5: what is the average context length of APE and real-world, end-to-end latency, including retrieval latency? As far as I understand the CRAG paper, the context length is quite limited with a smaller size (2k~4k) due to the speed of retrieval.\n\n3. What do you mean that `query and generation lengths were fixed at 256 tokens` in line 464? Is that mean, the chunk size of APE is 256? As far as I see the latency, I think 256 means the chunk size, which is NOT used in performance evaluation. Do you have any performance evaluation with 256 chunk size? If my understanding is correct, then the reported latency is might be significantly different with real-world scenario."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "- Nice empirical analysis to show value state can be merged & key states are similar for each context chunk.\n- Speeding up prefill stage drastically."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "APE proposed the adaptive parallel encoding of LLM prompt, to speedup the prefill stage without limited by pretrained context length. APE add sink tokens (shared prefix) and split the context into chunks and perform attention inside of it. During decoding stage, it attends to every previous KV without adjusting the RoPE, so it will not exceed the pretrained context length."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The methodology might be too similar, with fixed step sparse attention with sink token. When we draw the attention mask of APE, the attention mask itself is extremely similar to the step attention mechanism. However, the difference between APE and APE is that APE reuses the RoPE embeddings rather than just extends them. However, treating the RoPE index is the same as streaming with fixed step sparse attention. Therefore, I think the scientific contribution of methodology is limited. \n\n2. The performance of the method is mostly proved by empirical results. I do not think these empirical results are weaknesses, but I have concerns about the lack of important comparisons with previous technologies to extend the context window and speed up prefill.\n\n- Lack of comparison with training-free context extension method (Self-Extend: https://github.com/datamllab/LongLM).\n\n- Lack of comparing with pretrained long context LLM. This might not be a big problem, but we need to know what performance is the upper limit if the LLM is already trained in a long context. The LLMs used for experiments are all short context (and maybe a little bit old at this point) models. I am concerned that using a long-context model such as Qwen2 or Llama3.1 that supports 128k tokens with a large-scale GPU cluster might lead to better performance than APE.\n\n- Lack of comparing with techniques to speed up the prefill stage.\n\n--- Some minor improvements ---\n\n- Figure 3 and 4 are quite hard to understand. Can you increase the font size? And also I do not think we need visualized every layers. Can you randomly sample some layers and show them only? (e.g., range(0, 32, 4)).\n- In Table 1, there is a typo in Gemma2. Maybe you shifted the columns to the left.\n- Tables 2, 4, and 5 might have a too large a size.\n- Table 2 should improve the formatting."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024ape,\ntitle={{APE}: Faster and Longer Context-Augmented Generation via Adaptive Parallel Encoding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yUC8pU508S},\nnote={under review}\n}"
},
"abstract": {
"value": "Many modern language model applications, such as RAG and in-context learning, require the efficient combination of multiple external contexts to generate a response. Directly incorporating these contexts sequentially presents two challenges: (i) re-encoding each combined selection of contexts for every request creates a significant computational burden. (ii) concatenating selected contexts into a single sequence often exceeds LLM's context window limit. In this work, we explore the promising potential of parallel encoding as a solution to pre-cache the KV states of each context separately, allowing for direct loading and position reuse during inference. However, due to the misalignment of attention distribution, directly applying parallel encoding results in significant performance degradation. To enable accurate and efficient parallel encoding, we propose adaptive parallel encoding, which brings a shared prefix, additional scaling factor, and lower attention temperature to align the distribution of parallel encoding with sequential encoding. Experimental results on both ICL and RAG tasks tasks demonstrate an average improvement of 7.8% over standard parallel encoding. Comparing to sequential encoding, APE enhances performance by 2.9% for long context understanding while preserving 93% accuracy in few-shot learning. Efficiency evaluation demonstrates that APE achieves a 976$\\times$ speedup for a 512K context-augmented generation with a 256-token response."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Parallel Encoding; Context-Augmented LLM; Efficient Inference; Context Window Expansion"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4a7013f32f83a80078f0e97271525590607672d4.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "APE: Faster and Longer Context-Augmented Generation via Adaptive Parallel Encoding"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yUefexs79U | Quantitative Approximation for Neural Operators in Nonlinear Parabolic Equations | main | Active | Neural operators;Partial differential equations;Nonlinear parabolic equations;Quantitative universal approximation | learning theory | 5;5;5;8 | 3;4;5;4 | 3;3;3;4 | 3;3;3;4 | 3;3;4;4 | 5.75 | 4 | 3.25 | 3.25 | 3.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Can the authors provide numerical experiments to validate the theoretical results and compare the performance of their neural operator with traditional PDE solvers or other neural operator architectures?\n2. The dual of $L^\\infty$ is more complex than that of $L^1$. The authors should consider revising or removing this aspect throughout the paper.\n3. Could the authors address how the proposed method handles periodic boundary conditions, which are essential for applying FNOs? Additionally, could they discuss the role of the trace operator and how to ensure sufficient regularity for the boundary conditions when defining the neural operator $\\Gamma$?\n4. Can the authors elaborate on how the proposed framework can be extended to handle more general PDEs, specifically the Navier-Stokes equations mentioned in the abstract with their specific projection operator and boundary conditions?\n5. In PDE theory, extending local solutions to global solutions is a challenging and significant issue. Although the paper acknowledges the challenge of extending local solutions to global ones, it does not provide concrete solutions or insights. Could the authors elaborate on this challenge and clarify the practical relevance of Corollary 1, especially regarding its error estimate for long-time solutions?\n6. How does the choice of basis functions $(\\phi$ and $\\psi)$ in the neural operator affect the approximation error rates? Could the authors provide specific examples to illustrate these rates for different basis functions?\n7. Regularity of the solution space is crucial in numerical approximation. However, this paper does not incorporate this aspect into an error analysis. Could the authors characterize the impact of function regularity on the error estimate?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents a quantitative approximation theorem for approximating solution operators of nonlinear parabolic PDEs using neural operators.\n2. The authors show that the depth and number of neurons in their neural operators do not grow exponentially with the desired accuracy, potentially mitigating the \"curse of parametric complexity.\"\n3. The proof is constructive, leveraging the connection between Picard's iteration and the forward propagation through the layers of the neural operator.\n4. The proposed framework may be potentially generalizable to other nonlinear PDEs solvable by Picard's iteration, including the Navier-Stokes, nonlinear Schrodinger, and nonlinear wave equations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This research focuses on a new method for approximating the solution operators of nonlinear parabolic PDEs using neural operators. The authors aim to bridge the gap between theoretical understanding of neural operators and their practical application as PDE solvers by developing a quantitative approximation theorem. This theorem demonstrates that neural operators, specifically designed with a structure inspired by Picard's iteration, can approximate solutions of parabolic PDEs efficiently without experiencing the exponential growth in model complexity often associated with general operator learning. The authors provide examples of how their method can be applied to Fourier neural operators (FNOs) and wavelet neural operators (WNOs), highlighting the potential for this approach to solve a wide range of nonlinear PDEs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The theoretical results are not supported by numerical experiments, making it difficult to assess the practical performance and efficiency of the proposed neural operator architecture.\n2. The paper focuses on a specific class of parabolic PDEs amenable to analysis via Banach's fixed point theorem. The applicability to more general PDEs, particularly the Navier-Stokes equation mentioned in the abstract, is not demonstrated.\n3. The paper does not adequately address the challenge of handling periodic boundary conditions, a crucial aspect for applying FNOs.\n4. The paper does not discuss the role of the trace operator and how to ensure sufficient regularity for the boundary conditions when defining the neural operator $\\Gamma$.\n5. The assumptions on the operator L and the nonlinearity F are quite strong, potentially limiting the scope of applicability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. I do not think it is easy to find a Green's function or design one, which I see as the main drawback of the paper. Perhaps it is not that hard for the linear parabolic PDE. (addressed in the Questions below). How can one go about finding these (for specific cases or in general) or is this mainly constructive? Or do you only need to characterize the regularity of the Green function and how can you do that, if that is the case?\n2. Could you clarify if the error being log ((1/eps)^{-1}) implies exponential dependency?\n3 (a). How can I interpret K^{(l)}_N? W are weights, b are biases, but I am not sure what K (the rank) is (unless it is specific to the Neural Operator?)\n(b) Can you clarify (or add in the paper to make it more self-contained (see point 1 in Weakness) what is the rank of a neural operator? I do not believe it is defined in your paper (I checked other sources to understand). \n4. It seems the first 2 sentences of your Discussion are contradictory: you mention it is a limitation that your method only applies to parabolic PDEs, but the next sentence says your approach is not restricted to parabolic PDEs. Perhaps you could clarify that it is not truly a limitation then, and is just a first step?\n5. For the long-time solution, how reasonable is it that u(T) again falls into Ball(L_\\infty, R) ? Does that limit the expressiveness of the solution of the PDE?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "This is a highly rigorous and technical paper. The details are presented clearly and I am able to understand the authors' motivation and thought process in developing the theory. I think this is a great theoretical contribution that has solid foundations in PDE theory to better understand Neural Operators (of which there are varieties like Fourier, Wavelet, etc.) and unifies all of them in this fairly decent class of PDEs. I hope to see extensions of this work (that the authors mentioned there would be) to more general classes of PDEs but this is a significant start. Using neural operators as solvers for PDEs has a wide array of applications, particularly in scientific computing and other \"AI4Science\" domains. What is interesting is the method and analysis they use to bypass, by appropriately selecting the basis functions within the neural operator, the exponential growth in model complexity. They also do not seem to have too many unreasonable assumptions (this I'm not highly confident about). Error rates with respect to truncation N make sense. There is a lot of scope for future explorations with different architectures and PDEs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors derive the approximation rate of solution operators for the class of nonlinear parabolic partial differential equations (PDEs), contributing to the quantitative approximation theorem for solution operators of nonlinear PDEs. They show that neural operators can efficiently approximate these solution operators without the exponential growth in model complexity, thus strengthening the theoretical foundation of neural operators. An innovative link between Picard iteration and neural operators is found and the authors also leverage solid PDE theory (using Duhamel's principle). This work provides a very good theoretical foundation for neural operators (and generalizes previous works on Neural operators such as Fourier Neural Operators, etc.)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper could provide a bit more intuition and background on Neural Operators to be a bit more self-contained. This could be presented in the Supplementary.\n2. There are no numerical experiments (but for this paper I do not think this is a big weakness given that I believe the main contribution of theory is solid). Nevertheless, it would be nice to see in future (if it is even possible to implement all these different basis expansions, etc.)\n3. I do not think it is easy to find a Green's function or design one, which I see as the main drawback of the paper (if there is to be a practical extension for Deep Learning). Perhaps it is not that hard to find one for the linear parabolic PDE. (addressed in the Questions below).\n4. Minor: typo on line 182 \"elliptic operato,r \" \nI cannot think of any more now, but it depends on my questions below as well as I may not have understood everything."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "### Additional points requiring clarification\n1.\tThe paper mentions that Theorem 1 avoids the curse of dimensionality. However, this is not entirely correct: it seems that in the statement of the theorem, the involved constant $C$ may still grow exponentially in $d$?\n2.\tTheorem 1 requires $N$ to be chosen in such a way that $C_G(N) \\leq \\varepsilon$. Is it possible to quantitatively relate $N$ and $\\varepsilon$, e.g., in the FNO case?\n3.\tP.3: You state that under suitable conditions on $\\mathcal{L}$ and $F$, the problems (P) and (P’) are equivalent, if the function $u$ is sufficiently smooth. Could you give some precise references here?\n4.\tAppendix A does not fully clarify why the considered operators satisfy Assumption 1. For example, A.1 states a bound on the Green function, but does not make explicit why this bound implies Assumption 1. For readers less familiar with PDE theory it would be useful to make explicit (at least for one of the sections A.1, A.2, …) why this bound implies that Assumption 1 is satisfied. \n5.\tSimilar results to Proposition 1 are well-known in the literature. Can you relate Proposition 1 to the literature more precisely, outlining differences to existing results in the literature? \n6.\tFourier neural operators: why do you only consider the case $d=1$ here? \n7.\tCould you clarify if WNOs with Haar wavelets, as defined in Appendix C.2, have been used in previous papers?\n8.\tWhat is the role of $\\tilde{D}$ in Appendix C.1? Can’t we just set $\\tilde{D}=D$?\n\n\n### Additional comments:\n\n-P.4, l182: it should be operator, not operato,r"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, the paper is well-written and structured. The results hold in quite general settings: Assumption 1 on the differential operator $\\mathcal{L}$ of the PDE is rather general and includes, e.g., Laplace operators with different types of boundary conditions. Assumption 2 on non-linearity seems more restrictive and mainly limited to power-type non-linearities, but the obtained results are useful even in the case $F=0$, which is also covered by the setting. Assumption 3 is crucial to make the approach work, based on an expansion of the underlying Green's function. In the supplementary material the paper specializes the setting to certain types of Fourier neural operators and Haar wavelet neural operators, demonstrating the usefulness of the result."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper defines a neural operator based on Picard iteration and provides a quantitative approximation theorem for learning the solution operator of a general class of non-linear parabolic PDEs. The paper starts with a local well-posedness result for the considered PDEs, then introduces the considered neural operators, states the quantitative approximation theorem, provides a sketch of the proof and discusses extensions to longer time-horizons. The supplementary material contains proofs and shows how the result can be applied to Fourier Neural Operators (FNOs) and Haar Wavelet Neural Operators (WNOs)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The setting is formulated very abstractly, making reading potentially difficult for readers less familiar with general PDE theory. The paper does not discuss how the constructed neural operators could be implemented. In relation to existing neural operators (FNOs, WNOs) it remains unclear in what generality they are covered. Relation to existing literature is not always clear. I make these points more specific in the questions posed below.\n\nGenerally, I think this could be a valuable contribution, enhancing our understanding of mathematical foundations of neural operators. However, currently there seems to be a mistake in the proof of Theorem 1, affecting the construction of the neural operators and potentially making implementation infeasible. Consequently, at this point I need to recommend rejection, but I would be willing to increase my rating if this issue can be addressed convincingly. \n\n### Problem in the proof of Lemma 5\n\nThe main issue I currently see is that there appears to be a problem in the proof / formulation of the neural operators. The issue concerns the use of the time horizon $T$ instead of time variable $t$. In the definition of $\\Phi_N$ in lines 264-269, in the first line the time integral is from $0$ to $t$ (the variable of the function to be approximated). In the next line, the integral with respect to $\\tau$ is replaced by the inner product $\\langle \\psi_m, F(u) \\rangle$. However, this means you are taking the integral up to $T$, not just up to $t$. \nThe same problem can be found in the proof of Lemma 5 in the Appendix (lines 1245-1248), which does not seem to be correct in its current form. It seems that, to address this, the neural operator architecture would need to be modified, by making also the inner products $\\langle \\psi_m, u \\rangle$ in the definition of the operator $K^{(\\ell)}_N$ depend on the time variable $t$.\nHowever, this would make a crucial difference from the perspective of computational effort required to evaluate the neural operator. In the current notation, to evaluate $K^{(\\ell)}_N$ only one inner product needs to be evaluated (the same for all times $t$). It seems that to fix the issue mentioned above, for each time $t$ a different inner product would need to be evaluated, which may be computationally infeasible (as at any given hidden layer, you are computing inner products with output functions from previous hidden layers). Here a thorough discussion is required.\nEven without this issue, a more thorough discussion on implementation would be needed: The approach crucially relies on evaluating inner products, by definition of $K^{(\\ell)}_N[u]$ . To compute such inner products (between the hidden layer functions and the basis functions $\\psi_m$) in practice, additional discretization is required at each layer; for each function evaluation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My primary concern is with the ReLU network approximation, specifically in the correspondence between the neural operator structure defined on page 6 and Lemma 2. In Eq. (24), you define the mapping, but it remains unclear how this mapping aligns with the neural operator structure described earlier. Please explain how the neural operator structure in page 6 result in eq (24). A clearer explanation or justification is needed to ensure consistency between these sections.\n\nAdditionally, I noticed that the notation $\\Phi$ represents one step (either exact or approximated) of the iteration. However, on page 6, within each iteration, the activation is only used once, which is a shallow neural network with a single hidden layer. The reference to Guhring et al. (2020, Theorem 4.1) pertains to deep neural networks, which might not directly apply to this context. I suggest finding a more appropriate reference for the approximation result, specifically one that addresses shallow networks.\nAnother minor question is that, I think your remark “the curse of dimensionality that occurs in conventional neural network approximations does not appear here” in page 8 does not make sense. As addressed by yourself, the curse of dimensionality is hidden in Assumption 3, the representation of the Green’s function of the linear equation.\n\nAnother minor question is that, I think your remark “the curse of dimensionality that occurs in conventional neural network approximations does not appear here” in page 8 does not make sense. As addressed by yourself, the curse of dimensionality is hidden in Assumption 3, the representation of the Green’s function of the linear equation."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Same as summary."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the approximation property of the neural operator for representing the solution operator for nonlinear parabolic equations. The key idea is that, the solution of the equation, written as the Duhamel’s integral, can be obtained from Picard iteration, with exponential convergence. The authors use a deep neural operator to approximate the Picard iteration process, where each layer approximates one step of Picard iteration. Overall, the paper is well written with clear explanation for all the ideas."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Need for clarification on ReLU approximation. Detail in Questions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024quantitative,\ntitle={Quantitative Approximation for Neural Operators in Nonlinear Parabolic Equations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yUefexs79U},\nnote={under review}\n}"
},
"abstract": {
"value": "Neural operators serve as universal approximators for general continuous operators. In this paper, we derive the approximation rate of solution operators for the nonlinear parabolic partial differential equations (PDEs), contributing to the quantitative approximation theorem for solution operators of nonlinear PDEs. Our results show that neural operators can efficiently approximate these solution operators without the exponential growth in model complexity, thus strengthening the theoretical foundation of neural operators. A key insight in our proof is to transfer PDEs into the corresponding integral equations via Duahamel's principle, and to leverage the similarity between neural operators and Picard’s iteration—a classical algorithm for solving PDEs. This approach is potentially generalizable beyond parabolic PDEs to a range of other equations, including the Navier-Stokes equation, nonlinear Schrödinger equations and nonlinear wave equations, which can be solved by Picard's iteration."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neural operators",
"Partial differential equations",
"Nonlinear parabolic equations",
"Quantitative universal approximation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4d1bcdee15c02ab8fdccbcbd3a0b5fc93fb25b41.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Quantitative Approximation for Neural Operators in Nonlinear Parabolic Equations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yVGGtsOgc7 | Disentangling Representations through Multi-task Learning | main | Active | zero-shot generalization;disentanglement;representation learning;multi-task learning;interpretability;computational neuroscience;evidence accumulation;world models;cognitive maps;continuous attractors;RNNs;transformers | applications to neuroscience & cognitive science | 3;6;6;6;8 | 4;4;4;2;4 | 2;3;3;3;3 | 1;3;3;3;3 | 2;4;3;3;3 | 5.8 | 3.6 | 2.8 | 2.6 | 3 | -0.0625 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In Equation 6, could you clarify if $x_{in}$ is the encoded input, i.e., $f(x)$?\n\n- Does the dimensionality of the latent state Z influence the results? Also, does the dimensionality of the encoded input play a significant role? Have these aspects been examined?\n\n- Have you conducted experiments with other encoding functions, such as a quadratic mapping? \n\n- In Figure 5b, the blue curve for GPT appears to increase at the end. Could you clarify the reason behind this behavior?\n\n- Since the noise at each time step is independent, temporal information isn’t modeled in this setup. How might the results differ if a transformer with self-attention layers was used? Would such a setup still yield disentangled representations?\n\n- On the other hand, if the inputs contained some temporal structure, how might this affect the nature of the representations?\n\n- Have you tested the framework with much larger values of $N_{task}$ and $D$ (e.g., over 100)?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents theoretical results that establish specific conditions—relating to the number of tasks, input dimensionality, input noise, and more—that lead to the emergence of abstract and disentangled representations in agents solving multi-task evidence aggregation classification tasks. The authors conduct thorough experiments across several architectures (RNNs, LSTMs, and transformers) to validate their theoretical results, showing that even architectures like GPT-2 can exhibit these properties. The work bridges AI and neuroscience, with potential explanations for how abstract, human-aligned representations might arise in artificial neural networks, making it valuable for both fields."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the important question of how abstract (linear and approximately orthogonal) and disentangled (orthogonal without the necessity of linearity) representations can emerge in biological and artificial agents. \nThe authors present both theoretical and experimental results demonstrating that multi-task learning, specifically within the framework of evidence aggregation classification tasks, can lead to the development of such representations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "One notable limitation of this work is the assumption of factorization, as acknowledged by the authors. Additionally, the theoretical framework is tailored to a specific type of multi-task learning problem—evidence aggregation classification with linear decision boundaries—which may not capture the full diversity of tasks and decision boundaries that agents encounter in dynamic environments. It would be valuable to explore how these ideas generalize to other multi-task learning scenarios. Furthermore, the experiments focus on synthetic data with simple latent structures. While these experiments effectively support the theoretical results, testing the framework on slightly more complex data could provide further insights."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Most examples and empirical validations focus on low-dimensional cases (e.g., D=2). Could you provide examples using moderately higher-dimensional datasets, such as CIFAR or dSprites, to illustrate how N_task scales with D in these settings? \n2. For a given D, what is the maximum number of tasks N_task can be learned with low error? Additionally, how do the test and generalization errors scale with N_task if we keep the number of training data in at a realistic scale?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Originality: the study of the emergence of disentangled representation in temporal tasks and models is relatively new. \n2. Quality: the empirical validation is comprehensive. \n3. Clarity: the theory is well explained. The background section is also very nicely written and thorough. \n4. Significance: The paper addresses the key problem of learning the world model in representation learning and also discusses in detail the biological relevance. The results offer meaningful insights for future research on disentangled representation in both artificial systems and the brain."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents theoretical and empirical results showing that, in multi-task evidence aggregation classification tasks, representations become disentangled as the number of tasks N_task greatly exceeds the input dimensionality 𝐷 when the agent solves the tasks optimally."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The theory assumes that agents are optimal multi-task classifiers, which may not be achievable in realistic settings where the input dimension D is already large and the number of tasks N_task >>D. This raises questions about the practical relevance of the regime considered in the paper. Additionally, it’s difficult to imagine a large number of orthogonal tasks in real-world settings. For instance, in the dSprite dataset, as the authors noted, how could a meaningful, larger set of tasks be constructed, and what implications would this have for the applicability of the framework?\n2. The connection to neuroscience also appears somewhat speculative. The authors could clarify how their findings might directly relate to brain function. For example, can the theory predict the relationship between the disentanglement of neural representations and the level of noise or variability in different brain regions?\n3. While explaining zero-shot generalization capabilities in artificial neural networks (ANNs) is a central claim, this remains a substantial challenge for most networks. The theory should address whether prior models fail in zero-shot generalization due to insufficient N_task and how practitioners could estimate an adequate N_task, especially in settings where underlying latent factors are unknown, making it impractical to sample random classification tasks as in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "**Sparsity:**\n\nThe emergence of sparsity in Fig. 4 is interesting. Could the authors specify if they applied any particular architectural constraints, such as regularization techniques or sparsity-promoting mechanisms, to encourage this behavior?\n\nAdditionally, quantifying the sparsity (e.g., lifetime sparsity, [Vinje and Gallant, 2000](https://www.science.org/doi/10.1126/science.287.5456.1273)) and examining how this metric varies with N_task, the number of latent dimensions, and specific RNN architecture choices would provide further insights into the effects of task density on network representation.\n\n**More nonlinear encoding:**\n\nThe authors' use of a piecewise linear MLP+ReLU encoding of ground truth signals raises questions about the robustness of their empirical results under more complex nonlinear encodings.\n\nCould the authors explore how the N_task vs. out-of-distribution r2 relationship (Fig. 3) might change if the encoding network incorporated more nonlinear functions such as exponential or power-law activations, to simulate more biologically plausible ([Priebe et al., 2004](https://www.nature.com/articles/nn1310)) nonlinear encoding? This line of inquiry is particularly relevant given that the choice of activation function significantly influences the geometry of representations ([Alleman et al., 2024](https://openreview.net/forum?id=k9t8dQ30kU))."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper introduces a theoretical framework connecting optimal multi-task classification to disentangled representations, building upon the empirical observations in [Johnston and Fusi (2023)](https://www.nature.com/articles/s41467-023-36583-0) . To support these theoretical claims, the paper provides a range of experiments exploring different architectures, task structures, and decision boundary geometries."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper extends the work of [Johnston and Fusi (2023)](https://www.nature.com/articles/s41467-023-36583-0) by investigating how multiple supervised binary classification tasks can lead to abstract or disentangled representations in recurrent neural networks that accumulate evidence from noisy inputs. The authors present theoretical and experimental evidence suggesting that optimal multi-task classifiers can learn abstract representations of underlying ground truth factors."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "A significant weakness of this paper is its heavy reliance on dense supervised signals to achieve abstract representations. Similar to [Johnston and Fusi (2023)](https://www.nature.com/articles/s41467-023-36583-0), this work utilizes multiple binary classification tasks, framed as \"multi-task\" learning. This framing implies a diversity of tasks that is not truly reflected in this setup. The dependence on numerous, highly similar, supervised tasks raises questions about the biological plausibility and real-world applicability of the proposed mechanism.\n\nThe requirement of a large number of tasks (N_task >> D) to achieve disentanglement adds to concerns regarding the model's biological plausibility and practical relevance. Biological and artificial agents operating in real-world environments rarely encounter such an abundance of supervised signals.\n\nThe authors assert (lines 486-493) that their Theorem 3.1 implies \"the key factor driving convergence [to a Platonic representation of reality] is the diversity and comprehensiveness of the tasks being learned.\" This claim is not supported by the results presented. The tasks in this paper are essentially multiple variations of the same supervised classification task, a far cry from the genuinely diverse tasks (e.g., multimodal vision and language tasks) considered in the Platonic representation paper ([Huh et al., 2024](https://arxiv.org/abs/2405.07987)).\n\nIt is well-established that achieving disentangled representations in real-world, unsupervised settings is incredibly difficult, if not impossible, without incorporating appropriate inductive biases ([Locatello et al., 2019](https://arxiv.org/abs/1811.12359); which the authors cite). A substantial body of research, spanning decades, has demonstrated that source signals become non-identifiable when mixed non-linearly ([Hyvarinen and Pajunen, 1999](https://www.sciencedirect.com/science/article/pii/S0893608098001403)). Considering this extensive history of work in machine learning dedicated to tackling this complex problem, this paper's approach—attaining abstract or disentangled representations by providing the model with an abundance of supervised signals in the form of binary classification tasks—appears contrived and ultimately of limited practical value. The reliance on such a heavily supervised setting makes it difficult to see how these findings could generalize to more realistic scenarios where supervision is scarce and the underlying structure of the data must be inferred, rather than provided.\n\nOverall, this paper makes incremental progress on the work of [Johnston and Fusi (2023)](https://www.nature.com/articles/s41467-023-36583-0) but fails to address the fundamental concern of over-reliance on dense supervised signals. As suggested by [Johnston and Fusi (2023)](https://www.nature.com/articles/s41467-023-36583-0), exploring unsupervised mechanisms such as \"predicting the sensory consequences of our actions,\" might offer a more promising path forward.\n\nInstead of relying on a multitude of supervised signals to enforce disentanglement, future work in this area should investigate whether disentanglement can emerge from a smaller number of more diverse tasks, where the need for efficient generalization across tasks drives abstraction. This shift in focus would significantly enhance the biological plausibility, practical relevance, and overall impact of this line of research to understanding disentangled representation learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Comments:\n\nLine 231: “…in the column space of (C^TC)^{-1}C^T potential with some element-wise non-linearity.” This statement seems too strong given that the restriction on the activation function g has been significantly relaxed. Are there any preliminary results to qualify this statement?\n\nCorollary B9: While I agree with the results of this corollary, it would be good to explicitly connect the uniformity of the singular values to the corollary statement using the Marchenko-Pastur law. Also in this corollary, at line 1613, it is stated that the probability of being orthogonal vanishes as the dimensionality N_task increases. Was this supposed to be “non-orthogonal” instead?\n\nFigure 2: This figure is particularly unclear, and it is perhaps one of the important foci in explaining all the experiments to follow. Upon first viewing with the caption, it is not clear what the arrows (two grey and one red) in Fig. 2a are supposed to imply.\n\nLine 294: “…in the RNN’s hidden layer, when tasks span the latent space.” Does this really imply anything about the tasks formally spanning the latent space?\n\nFigure 4a: I agree that the RNN representations are sparse, and this looks like a good spot to test if there is any lower threshold on the size of the hidden units before the latent encoding fails.\n\nLine 450: “…which might explain their superior generalization performance to RNNs for lower N_task.” It is not really clear from Fig. S10 that the RNNs are indeed worse at orthogonalization than the GPT.\n\nLine 1119/1120: “Figure S3b shows that the network still learns a disentangled, two dimensional continuous attractor.” Not convinced that this actually shows disentanglement since the metric for that qualification from before was the orthogonalization of different tasks. Did you mean to say abstract here? Also, small typo in the spelling of “disentangled”.\n\nMinor comments:\n\nLine 1321: \\hat{Y}_i(t) is noted as a Bernoulli random variable. This implies the support of the estimator is {0,1}, but it should really be [0,1].\n\nLine 1408: Similar comment here to Line 1321 on the use of Bernoulli variable for \\hat{Y}(t).\n\nLine 1426: In Equation 16, boldfaced t should be scalar.\n\nLine 1592: “Proof: Recall equation (3)…” should be equation (4).\n\nLine 1608: “B^TB is a symmetric for any matrix B.” Some LaTeX formatting errors here and possibly missing the word matrix following “symmetric”.\n\nLine 309: “disentaglement” misspelled\n\nFig. 3d Caption: May be good to include a short statement about the inset on PCA components accounting for variance.\n\nLine 1070: “Once the factors are perfectly, performance…” missing the word correlated after “perfectly”\n\nLine 1162: “…for all lines in Figure S4a.” Did you mean all lines from Fig. 2a, shown in Fig. S4a?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The key contribution is an extension of the Johnston and Fusi (2023) feedforward framework to recurrent networks. They show that abstract variables are indeed represented in the latent states of RNNs, LSTMs and GPT-2 style transformer architectures when these models are trained using the multi-task paradigm. The results are interesting and point to a marked advantage in adopting the multi-task paradigm for understanding canonical neuroscience experiments in an AI framework. The discussion of these results in the context of contemporary neuroscience literature is also well framed and places this paper in a good position to appeal to both the broader neuroscience and task-learning communities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of learning disentangled representations using a multi-task evidence aggregation approach. In contrast to previous works which have studied a similar problem of abstract and disentangled representations using contextual information, the authors here adopt an approach which simultaneously collects multiple streams of evidence to enable linear classification in multiple tasks at each step. They prove the emergence of an abstract representation of ground truth data given an optimal multi-task classifier, which is shown to be learnable using RNN, LSTM, and GPT-2 based transformer architectures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the experimental results are solid contributions, the theoretical contributions are less appealing. The key theoretical results follow from fairly straightforward arguments, but appear a bit overstated. This can potentially be remedied with some re-wording and clarifying statements. \n\nIn my opinion, the following two sentences in the abstract are overstated for reasons that I expand further below: \"The key conceptual finding is that, by producing accurate multi-task classification estimates, a system implicitly represents a set of coordinates specifying a disentangled representation of the underlying latent state of the data it receives.....Overall, our framework puts forth parallel processing as a general principle for the formation of cognitive maps that capture the structure of the world in both biological and artificial systems, and helps explain why ANNs often arrive at human-interpretable concepts, and how they both may acquire exceptional zero-shot generalization capabilities\". \n\nThe conditions under which the authors prove the theoretical result are rather strong. Specifically, the authors require 1) that the network is an *optimal* classifier in the sense that the network correctly computes the N_task logits associated with each of the N_task tasks, 2) that decision boundaries in the world are linear in the space of abstract variables, and 3) a highly simplified evidence accumulation problem. \n\nIt follows from 1) and 2) that the output of an optimal network encodes the distance to the N_task decision boundaries. The optimal network will thus represent a linearly transformed version of the input if N_task is larger than the dimensionality of the input (provided the decision boundaries are not degenerate). The mathematical proofs presented in this paper formalize this argument. I believe it will help clarify the results if the authors provide this intuition upfront. The proofs have various conceptual issues, for example, in the use of the data processing inequality in Theorem B.5 and the mix-up of random variables and their estimators (lines 1442-1445). \n\nThe authors state that their framework offers a \"general principle for the formation of cognitive maps\". I believe it will be helpful if the authors can clarify what this general principle is -- is it the proposal that decision boundaries in the world are linear in the space of the abstract variables or that biological networks are solving multiple tasks at the same time? How is the current work related to the formation of cognitive maps and zero-shot generalization?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "For the neuroscience reader, it would be good to clarify what multi-task means in this setting. Does it mean that there are many different classification criteria or decision boundaries that have to be met? In other words, is the multi-task aspect a way of saying that the number of classification criteria have to exceed the dimensionality of the underlying (ground truth) evidence? Perhaps it would be useful to have a simple example that people can imagine applying to a perceptual classification task in a psychophysics laboratory?\n\nBy evidence aggregation, did you mean evidence accumulation in the spirit of drift diffusion and race to bound models?\n\nIs there any relationship between the functional form of your formulation and schemes based upon the principle of maximum mutual information (a.k.a. infomax principle) (Linsker, 1990). Canonical schemes here would include canonical variates analysis and non-linear versions, such as independent component analysis that take us into the realm of sparse coding models. One interesting aspect of independent component analysis is the identification of latent variables that have a non-Gaussian structure under the assumption that the random fluctuations are Gaussian. Is this related to your interesting observation that noise is required for the kind of disentanglement you are considering?\n\nCan you formulate your evidence accumulation scheme as a Bayesian (e.g., extended Kalman Bucy) filter? If so, can you relate this to predictive coding in the brain (Rao, 1999)?\n\nOn a related note, can you formulate your description in terms of inverting a generative model? In other words, can you turn your scheme on its head and generate ground truth evidence from the ground truth classifications; i.e. causes. If so, is this one perspective on the requisite invertibility of f?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The strengths of this paper rest upon a detailed treatment of the functional forms of mappings — implicit in neural network architectures — and how they can be unpacked in relation to maximising the mutual information between inputs and (classification) outputs. Another nice feature of this paper is its reference to neuroscience; in the sense of potential implications for neuronal processing in the brain."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper demonstrates the emergence of disentangled representations in recurrent neural networks and transformer architectures in classification tasks based on evidence aggregation or accumulation over time. The conditions under which disentangled (latent) representations emerge are studied in relation to the number of classification criteria, relative to the dimensionality of the input. The paper motivates its focus with reference to the neuroscience literature on evidence accumulation and offers a number of theorems predicated on mutual information and the functional forms of nonlinearities in various mappings, implicit in the network architectures that could be adopted."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The specific contributions of the analysis are not foregrounded sufficiently. For example, one could read the setup considered by the authors as characterising a mapping between inputs x and classification estimates (Y^) using a bottleneck architecture (i.e., mapping through a low dimensional latent space or manifold). If any such mapping were optimised with respect to cross entropy or mutual information, would the latent representations not be disentangled? In other words, is it a linear (orthogonal) disentanglement that qualifies as interesting disentanglement? And, if so, does this inherit from the assumptions and functional form of the mappings in question?\n\nThe second weakness is the rather colloquial appeal to the neuroscience literature. For example, evidence aggregation or accumulation would not be considered canonical in the cognitive neurosciences. Canonical paradigms will be things like oddball paradigms, psychophysical paradigms, working memory paradigms, attentional paradigms, and so on. Furthermore, most of neuroscience nowadays cast disentanglement in terms of predictive processing under hierarchical or deep generative world models (i.e., disentanglement is a way of describing the inversion of a generative model of how independent causes become entangled). This inversion is usually articulated in terms of predictive coding or belief propagation. Given that the authors mention Bayesian filtering — and predictive coding can be read as extended Kalman (Bayesian) filtering — there might have been a missed opportunity to join the dots between the authors work and current formulations of perceptual synthesis and evidence accumulation in the brain. \n\nRelated to this, there is a long history of sparse coding models in neuroscience that speak to the current questions; for example, sparse coding models, variants of independent component analysis, liquid computation, echo state machines, and so on (Gros, 2009; Hu et al., 2020; Maass et al., 2002; Olshausen and Field, 1996; Simoncelli and Olshausen, 2001; Suh et al., 2016). More recently, people have been looking at latent representations at various hierarchical levels to address compositionality and disentanglement; especially in relation to spatiotemporal receptive fields that can be assessed empirically in the brain e.g., (Ficco et al., 2021; George et al., 2021; Rao et al., 2023).\n\nFicco, L., et al., 2021. Disentangling predictive processing in the brain: a meta-analytic study in favour of a predictive network. Scientific Reports. 11, 16258.\nGeorge, D., et al., 2021. Clone-structured graph representations enable flexible learning and vicarious evaluation of cognitive maps. Nature Communications. 12, 2392.\nGros, C., 2009. Cognitive Computation with Autonomously Active Neural Networks: An Emerging Field. Cognitive Computation. 1, 77-90.\nHu, H.-y., et al., 2020. RG-Flow: a hierarchical and explainable flow model based on renormalization group and sparse prior. Machine Learning: Science and Technology. 3.\nLinsker, R., 1990. Perceptual Neural Organization - Some Approaches Based on Network Models and Information-Theory. Annual Review of Neuroscience. 13, 257-281.\nMaass, W., Natschlager, T., Markram, H., 2002. Real-time computing without stable states: a new framework for neural computation based on perturbations. Neural Comput. 14, 2531-60.\nOlshausen, B.A., Field, D.J., 1996. Emergence of simple-cell receptive field properties by learning a sparse code for natural images. Nature. 381, 607-9.\nRao, R.P., 1999. An optimal estimation approach to visual perception and learning. Vision Res. 39, 1963-89.\nRao, R.P.N., Gklezakos, D.C., Sathish, V., 2023. Active Predictive Coding: A Unifying Neural Model for Active Perception, Compositional Learning, and Hierarchical Planning. Neural Computation. 36, 1-32.\nSimoncelli, E.P., Olshausen, B.A., 2001. Natural image statistics and neural representation. Annu Rev Neurosci. 24, 1193-216.\nSuh, S., et al., 2016. Echo-State Conditional Variational Autoencoder for Anomaly Detection. In: 2016 International Joint Conference on Neural Networks. IEEE International Joint Conference on Neural Networks (IJCNN), Vol., ed.^eds., pp. 1015-1022."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We theoretically prove multi-task learning is guaranteed to lead to disentangled, generalizable representations in autoregressive models, and validate our theory on RNNs and transformers performing cognitive neuroscience evidence accumulation tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024disentangling,\ntitle={Disentangling Representations through Multi-task Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yVGGtsOgc7},\nnote={under review}\n}"
},
"abstract": {
"value": "Intelligent perception and interaction with the world hinges on internal representations that capture its underlying structure (\"disentangled\" or \"abstract\" representations). Disentangled representations serve as world models, isolating latent factors of variation in the world along approximately orthogonal directions, thus facilitating feature-based generalization. We provide experimental and theoretical results guaranteeing the emergence of disentangled representations in agents that optimally solve multi-task evidence aggregation classification tasks, canonical in the cognitive neuroscience literature. The key conceptual finding is that, by producing accurate multi-task classification estimates, a system implicitly represents a set of coordinates specifying a disentangled representation of the underlying latent state of the data it receives. The theory provides conditions for the emergence of these representations in terms of noise, number of tasks, and evidence aggregation time. Surprisingly, the theory also produces closed-form expressions for extracting the disentangled representation from the model's latent state $\\mathbf Z(t)$. We experimentally validate these predictions in RNNs trained on multi-task classification, which learn disentangled representations in the form of continuous attractors, leading to zero-shot out-of-distribution (OOD) generalization in predicting latent factors. We demonstrate the robustness of our framework across autoregressive architectures, decision boundary geometries and in tasks requiring classification confidence estimation. We find that transformers are particularly suited for disentangling representations, which might explain their unique world understanding abilities. Overall, our framework puts forth parallel processing as a general principle for the formation of cognitive maps that capture the structure of the world in both biological and artificial systems, and helps explain why ANNs often arrive at human-interpretable concepts, and how they both may acquire exceptional zero-shot generalization capabilities."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"zero-shot generalization",
"disentanglement",
"representation learning",
"multi-task learning",
"interpretability",
"computational neuroscience",
"evidence accumulation",
"world models",
"cognitive maps",
"continuous attractors",
"RNNs",
"transformers"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c6128470b3c0c8687d8721bf42ffd63c1ac6c3b3.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to neuroscience & cognitive science"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/57674d73d002f99331fd72dec2e4b3a95287d08a.zip"
},
"title": {
"value": "Disentangling Representations through Multi-task Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yVQcr4qjD6 | Robust Function-Calling for On-Device Language Model via Function Masking | main | Active | language models;function-calling models | foundation or frontier models, including LLMs | 5;5;6;8;8 | 3;4;4;4;3 | 2;3;3;4;3 | 2;3;3;3;4 | 3;2;3;3;3 | 6.4 | 3.6 | 3 | 3 | 2.8 | -0.060193 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide a more detailed analysis of function name issues? Specifically, how many failed cases are due to function or parameter name errors? Figure 2 does not clearly convey this information. Authors may consider an error type analysis with detailed tables isolating the number of failure cases due to function name/ arguments name/ other error types.\n2. It would be beneficial for the authors to analyze how many failed cases result from irrelevant function calls. The urgency of this issue is not apparent in the current version. \n3. Based on the results in Table 5, it is difficult to verify the effectiveness of the proposed pipeline compared to xLAM variants. Suggestions could refer to weakness item 3.\n4. While Table 1 shows that Hammer achieves impressive results on AST evaluations, the comparitive performance boosting across other benchmarks in Table 2 does not show the same level of improvement. Is there any analysis to explain this discrepancy? Authors could provide discuss potential reasons for the discrepancy and propose additional experiments or analyses that could help explain the varying levels of improvement observed."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The work effectively identifies the issue of naming variability in function-calling LLMs and incorporates training methods to address it.\n2. Hammer models utilize an augmented dataset to handle null function-calling cases.\n3. The study conducts comprehensive evaluations, demonstrating that Hammer consistently performs well across diverse benchmarks, establishing it as a reliable open-source model for function calling."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study introduces Hammer, a novel family of foundation models designed to enhance function-calling capabilities in large language models (LLMs). The authors developed a specialized dataset comprising 7,500 instances to improve the models' sensitivity to irrelevant functions. By shifting the focus from function names and parameters to their descriptions, the Hammer models aim to reduce the likelihood of misinterpretation, thereby enhancing generalization across diverse benchmarks. Experimental results indicate that Hammer models achieve superior performance across various benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed methods may inherit potential biases inherent in LLMs. The sampled \"Masked Data\" can degrade in performance when encountering function names or parameters that differ significantly from those seen during training. This dependency may limit its effectiveness in applications where naming conventions vary widely.\n2. There are potential overfitting concerns. The authors do not provide a thorough data contamination analysis for the newly created dataset.\n3. Whether the performance boosting comes from the effectiveness of the proposed pipeline or the Qwen model itself remains questionable. According to the results of ``API-Bank’’ columns in Table 5, when using the same base model, Deepseek-Coder-7B, it is not as effective as the xLAM variant. Authors could have Qwen models trained with xLAM datasets to further elaborate it.\n4. The presentation of results is not well-organized. Comprehensive tables are difficult to follow and analyze, making it challenging to verify the effectiveness of each component. Authors could break down the long tables into smaller and more focused ones. Also, for figure 3, author could have the examples shown in function masking to be larger and more compact."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Have you tried replacing function names with semantically similar ones instead of random strings? This could be more meaningful since function names are important features in practice and descriptions and parameters may sometimes be missing. In some usage scenarios LLMs need to infer functionality from names alone.\n2. Could you provide more insights into other factors contributing to Hammer’s performance? Have you tried using DeepSeek models as base models (just like xLAM) instead of just Qwen? This would be helpful to understand what specific aspects of the base model selection impacted results. Also, it would be beneficial to know if there are other training tricks beyond data augmentation.\n3. For Table 1’s inconsistent performance across benchmarks, I am curious why does xLAM perform well on three benchmarks but poorly on two others? Could the authors provide some error pattern analysis and related insights?\n\nMinor things: the observations in the paper might be related to the code data contamination problem as well, where the memorization of func names / orders could impact the generalization ability. Ref: https://arxiv.org/pdf/2402.05980"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The function masking technique is an innovative solution to reduce model dependency on naming conventions. The empirical results show this approach helps achieve more consistent performance across different benchmarks.\n2. The authors conduct extensive experiments across multiple benchmarks and provide detailed ablation studies on masking ratios and irrelevance detection. The evaluation is thorough and well-documented with clear performance metrics.\n3. The work addresses a real-world problem in function-calling models and provides a lightweight solution suitable for on-device deployment. The improved generalization capability has significant practical value."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Hammer, a family of lightweight models for function-calling tasks that addresses the problem of inconsistent performance across different benchmarks. The authors propose a function masking technique during training and an irrelevance-augmented dataset to improve model robustness and generalization. Experiments show that Hammer achieves competitive performance compared to larger models like GPT-4 on various benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The authors replace function names with random strings, but don’t explore using semantically similar names. This approach may be overly aggressive since function names often contain valuable semantic information that could be preserved while still improving generalization\n2. While the paper demonstrates Hammer’s superior performance, it lacks detailed analysis of other potential contributing factors beyond the masking technique. The choice of base model and other training details could significantly impact the results\n3. More detailed error patterns analysis would be beneficial to understand the model and baseline failure patterns across different benchmarks"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness part."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors identified a very important issue of the current existing open source function calling models such as xlam.\n- Based on the issue, the method that the authors proposed makes a lot of sense and indeed achieves very good performance.\n- The model and dataset will be open source later, which would benefit the community a lot."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors studied large language models' function calling and tool use abilities. The authors not only identified a critical gap in existing function-calling models, where the model is often misled by specific naming of functions and arguments. \nTo address the issue, the authors present a novel tunning framework that masks the function name and arguments; this results in a family of various new open-source function calling models called Hammer and also a new specialized dataset for irrelevance detection. These models achieves state of the art performance on open benchmark and leaderboards such as Berkeley function calling (BFCL)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The method is specifically designed for function calling problems; it is unclear whether some other domains will also benefit the tunning framework;\n- I am not sure how this strategy is specifically related to on-device language models; It can also be applied to larger models, and it would be really great to see the performance of larger models such as 70B llama if they are applied with this method; \n- For the cases where there is little description, I am not sure if we can still leverage the masking strategy."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How does the function masking technique affect the model's zero-shot generalization to entirely new API schemas or documentation formats not seen during training?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The problem identification is well-motivated and articulated.\n2. The evaluations are comprehensive, covering multiple benchmarks and including detailed ablation studies on both masking ratios and irrelevance detection data proportions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Hammer, a novel family of language models specifically designed for function-calling tasks. The authors identify a critical limitation in existing models: their performance varies significantly across benchmarks due to being misled by function and parameter naming conventions. To address this, they propose a function masking technique during training and augment the training data with irrelevant detection examples. Their empirical results show that Hammer models achieve state-of-the-art performance across multiple benchmarks, particularly impressive for their parameter count, with the 7B parameter version competing with much larger models including GPT-4 on certain metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper doesn't include the comparison with some recent relevant baselines (e.g., ToolBench, API-Bank).\n2. There are no ablation studies on the effect of different description formats/lengths on model performance.\n3. There’s no discussion of potential failure modes or edge cases where function masking might perform worse than traditional approaches.\n4. The random string generation process for masking isn't fully specified - different approaches to generating random strings could affect reproducibility.\n5. The exploration of the trade-off between masking and maintaining semantic meaning in function names is limited."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see weakness"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. This paper identifies a significant challenge in current function calling models that is both practical and impactful, particularly as the function calling models are increasingly deployed in real-world applications where robustness and accuracy are essential.\n\n2. The proposed method is simple but effective, which addresses the problem thoroughly. The method guides the model to focus on function descriptions rather than names. Also, the method is potentially replicable and offers inspiration for ai safety practitioners in other fields who seek to improve model robustness.\n\n3. The evaluations are thorough and effectively demonstrate the method’s applicability to real-world use cases. \n\n4. The authors consider irrelevance detection, which is essential in practical applicaitons, as it reduces the risk of incorrect or unnecessary function calls and enhances reliability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the problem of inconsistent performance across benchmarks in current function-calling models, which often arises from different function and parameter names. The authors developed an augmented dataset and proposed a novel function masking method that guide the model to focus more on function descriptions, instead of names, thereby enhancing its robustness and generalization capabilities across diverse benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper claims that the model is designed for on-device function calling; however, it lacks sufficient explanations or evaluation results to demonstrate its efficiency or performance in such scenarios. The authors did not discuss the hardware configurations required to run the model on edge devices or address potential resource constraints. Additionally, the authors did not provide evaluation results regarding inference time, which are essential for assessing the model’s practicality in real-world, resource-limited environments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024robust,\ntitle={Robust Function-Calling for On-Device Language Model via Function Masking},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yVQcr4qjD6},\nnote={under review}\n}"
},
"abstract": {
"value": "Large language models have demonstrated impressive value in performing as autonomous agents when equipped with external tools and API calls. Nonetheless, effectively harnessing their potential for executing complex tasks crucially relies on enhancements in their function-calling capabilities. This paper identifies a critical gap in existing function-calling models, where performance varies significantly across benchmarks, often due to over-fitting to specific naming conventions. To address such an issue, we introduce Hammer, a novel family of foundation models specifically engineered for on-device function calling. Hammer employs an augmented dataset that enhances models’ sensitivity to irrelevant functions and incorporates function masking techniques to minimize over-fitting. Our empirical evaluations reveal that Hammer not only outperforms larger models but also demonstrates robust generalization across diverse benchmarks, achieving state-of-the-art results. Our open-source contributions include a specialized dataset for irrelevance detection, a tuning framework for enhanced generalization, and the Hammer models, establishing a new standard for function-calling performance."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"language models",
"function-calling models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4dfb416d8fa2dc8f8242cf69288c55616eebbe67.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/4fc1766d187613b75019a29d051866561d31a372.zip"
},
"title": {
"value": "Robust Function-Calling for On-Device Language Model via Function Masking"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yVVzaRE8Pi | You Know What I'm Saying: Jailbreak Attack via Implicit Reference | main | Active | Adversarial attacks;Jailbreak;Security;Black box;LLM;Alignment;Cross-Modality alignment;in context learning | alignment, fairness, safety, privacy, and societal considerations | 3;3;5;6 | 3;4;4;4 | 2;2;2;3 | 2;2;2;3 | 3;3;2;3 | 4.25 | 3.75 | 2.25 | 2.25 | 2.75 | 0.555556 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. First Attack Success Rate (FASR) is used in Table 1 across all approaches. It would be helpful to reader if the calculation method or reference to FASR is provided or does it simply mean ASR in the first stage of attack? The authors must define this in paper as it may be confusing to the readers.\n2. How the authors arrive at Equation 2 ? This needs to be explained in detail."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper shows good ASR on SoTA LLMs IN Table 1. \n2. The authors also show cross model attack in Table 2 (as explained in the algorithm).\n3. The authors compare their attack with baseline defenses and report the rejection rate in Table 5."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Attack via Implicit Reference (AIR) that decomposes a malicious objective into nested harmless objectives. AIR framework has a 2-step attack approach, wherein in the first step it uses LLM to rewrite the original malicious objective into nested objectives, and in the second step it uses multiple objectives to refine the model's response. The authors show that this technique jailbreaks LLMs. It shows higher ASR on state-of-the-art LLMs and also shows that ASR increases with increasing the size of the target model. This paper also shows cross model attack."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There is not much information available regarding the dataset details, like the number of samples during the training approach in the experimental section 4.1.\n2. This method makes use of LLM calls thrice in the method - once while rewriting the input prompt and next two times in the first and second stage of attack respectively. Three calls to an LLM for a single prompt is expensive with respect to time taken to rewrite a prompt. The authors should comment on the inference time of their approach so as to make it feasible to use their approach in practical scenarios.\n3. The paper is not very well written. There are some spelling mistakes on lines 40, latex syntax error in algorithm section \"iscrossmodel\".\n4. Overall, the paper is not well presented and lacks novelty. Like the \"Continue Attack\" in section 3.2 is just addition of a prompt-based filter. Prompt-based filters have been used before like in the work titled \"LLM Self Defense: By Self Examination, LLMs Know They Are Being Tricked\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Not needed"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the Introduction Section, line number ‘040’, ‘cyber-attacks’ have not been spelled correctly. Kindly do the needful.\n2. Refine Cross-Model Strategy Analysis, by optimizing the selection criteria for initial \"less secure\" models in cross-model attacks could be beneficial for replicating or further developing this approach."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The AIR method achieves a high ASR (above 90%) on models with large number of parameters.\n2. The Cross-model strategy, wherein, a less secure model is targeted first to create malicious content, which is then used to bypass more secure models, emphasizes the potential transferability of vulnerabilities between LLMs.\n3. The paper identifies an inverse relationship between model size and security, highlighting that larger model, typically with enhanced in-context learning capabilities, are more susceptible to AIR."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a jailbreak attack referred as Attack by Implicit Reference (AIR). AIR decomposes a prompt with malicious objective into nested prompts with benign objective. This method employs multiple related harmless objectives to generate malicious content without triggering refusal responses, thereby effectively bypassing existing detection techniques. AIR achieves attack success rate (ASR) of more than 90% on open-source and close-source LLM. It is highlighted that heavier model, i.e. models with more parameters, are more vulnerable to Jailbreak than heavier models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The success of the AIR method appears to heavily rely on models with high comprehension abilities, specifically in areas like nuanced language understanding, contextual retention, and sophisticated in-context learning. These capabilities are essential because AIR requires the model to maintain and link fragmented, implicitly harmful objectives across multiple interactions without overtly identifying them as malicious. Authors should test this with lighter models (models with fewer parameters) like LLaMa-3-8B and Mistral-7B etc.\n2. While the paper evaluates existing defenses (e.g., SmoothLLM, PerplexityFilter), there’s a lack of exploration or proposal for new countermeasures against AIR. Further suggestions on possible defenses could enhance the practical value of the paper.\n3. In principle the AIR attack looks very similar to other template based/ word substitution attacks example- (i) When \"Competency\" in Reasoning Opens the Door to Vulnerability: Jailbreaking LLMs via Novel Complex Ciphers by, Handa et. al., (ii) Jailbreaking Leading Safety-Aligned LLMs with Simple Adaptive Attacks, by Andriushchenko et.al. . Therefore, the novelty in the approach is not apparent. Authors should highlight what makes these attacks unique compared to the other template based or word substitution attacks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Which part is the main contribution of this paper? If the introduction of nesting objective generation, see weakness 1. If the method and cross model attack, see weakness 3.\n\n2. See weakness 4. What is the evaluation method used in your experiment?\n\n3. See weakness 5."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The introduction of \"nesting objective generation\" is interesting.\n\n2. The paper is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new jailbreak method **AIR**, which decomposes the malicious objectives into nested harmless objectives and uses implicit references to cause LLMs to generate malicious content without triggering existing safety mechanisms. In the first stage, AIR bypasses the model's rejection mechanism by breaking down malicious into nested benign objectives. In the second stage, AIR sends a follow-up rewrite request that includes implicit references to the content generated for the second objective in the previous stage\nwhile excluding any malicious keywords. Experiments have shown the effectiveness of AIR and some other insights."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While interesting, as the motivation and basis of your method, Section 2.3 \"Nesting Objective Generation\" needs more detailed analysis. You could add more analysis about the connection between the attention mechanisms and your proposed concept (nesting objective generation and implicit inferences). For example, you could analyze how the attention mechanism affects the nesting objectives. The existing version makes Section 2.3 more like a guess.\n\n2. This paper needs to be polished in its writing, for example, the full name and abbreviation of LLM are mixed. This reviewer suggests using the full name \"Large Language Model\" on the first mention, followed by \"LLM(s)\" thereafter.\n\n3. The technical contribution is limited, especially \"Cross-Model Attack\". As you cite as the baseline, PAIR and its following version TAP both employ another LLM as an attacker or the red-teaming assistant to attack the target model. You should explicitly highlight the difference between your paper with other related papers and are recommended you provide a comparative analysis to demonstrate your unique contributions.\n\n4. What is the evaluation method used in your experiment? You mentioned in \"Evaluation Metrics\" part that this paper employed three complementary evaluation methods. However, you should provide a clear breakdown of which evaluation methods were used of each experiment. If all of the three metrics are used, please give more details about how these three are balanced.\n\n5. Section 5 is more like experiment part than analysis. In this section, you give some \"insights\" such as the impact of LLMs' size and the number of objectives. However, this section lacks of detailed disccusion or further analysis behind this phenomenon."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The notation and definition in Sec. 2.3 is confusing. What is the support set of objectives, in text/tokens/embeddings space? What is the sum of objectives? How is $\\alpha$ defined, and in the LLM backbone, there are various attention layers and heads, how do these count together?\n- The conclusion in Sec 5.1 is interesting but under-explored. In particular, as the model size get larger, the general ability including rewriting is increasing as well as the safety awareness, the trade-off is more interesting to me. I would expect to see a study on a more fine-grained family (e.g., QWen 2.5 with 0.5B, 1.5B, 3B, 7B, 14B, 32B, and 72B variants), to see if there is any turn-over point along the evaluation. Also, I would expect an experiment to use the same model for the first stage, e.g., gpt-4o, then use each of the victim models for the second stage. Then the relationship is more straightforward.\n- Overall, I did not see the distinguishment between the proposed attack and some related work, especially contextual attack and multi-turn attack, and therefore, the novelty of so-called implicit reference design is unclear."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The motivation and attack design is intuitive and straightforward.\n- The experiment setup covers different sets of LLMs, including both open-sourced and closed-sourced models and in different size.\n- The evaluation results shows the proposed method is relatively efficient compared to baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work propose a new jailbreak attack against Large Language Models (LLMs), namely Attack via Implicit Reference (AIR). The design is to decompose a malicious objective into permissible objectives and links them through implicit references within the context. The experiment shows the effectiveness of the proposed attack. Also the evaluation on some defenses shows the robustness of AIR attack."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The evaluated samples is relatively small (i.e., 100), for some studies shows only marginal difference, e.g., Table 4 and 5, confidance interval is necessary to make the conclusion.\n- Attack baseline setup needs justification. Specifically, the proposed attack shared some insight from contextual attack, and multi-turn attack, while the compared baselines may not strictly fall into the same category. Some more related work, such as COU[1], COA[2] and ArtPrompt[3], is expected. \n- Defense baseline setup needs justification. Specifically, the evaluated defense baselines are primarily designed for attack that generated adversary tokens, which is quite away from the proposed attack. Though it is ok to include these defense, but it is necessary to cover more defense that may be more suitable for contextual and multiturn attack, such as moderation-based defense. \n\n[1] Bhardwaj, Rishabh, and Soujanya Poria. \"Red-teaming large language models using chain of utterances for safety-alignment.\" arXiv preprint arXiv:2308.09662 (2023).\n\n[2] Yang, Xikang, et al. \"Chain of Attack: a Semantic-Driven Contextual Multi-Turn attacker for LLM.\" arXiv preprint arXiv:2405.05610 (2024).\n\n[3] Jiang, Fengqing, et al. \"Artprompt: Ascii art-based jailbreak attacks against aligned llms.\" arXiv preprint arXiv:2402.11753 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024you,\ntitle={You Know What I'm Saying: Jailbreak Attack via Implicit Reference},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yVVzaRE8Pi},\nnote={under review}\n}"
},
"abstract": {
"value": "While recent advancements in large language model (LLM) alignment have enabled the effective identification of malicious objectives involving scene nesting and keyword rewriting, our study reveals that these methods remain inadequate at detecting malicious objectives expressed through context within nested harmless objectives.\nThis study identifies a previously overlooked vulnerability, which we term $\\textbf{A}$ttack via $\\textbf{I}$mplicit $\\textbf{R}$eference ($\\textbf{AIR}$). AIR decomposes a malicious objective into permissible objectives and links them through implicit references within the context. This method employs multiple related harmless objectives to generate malicious content without triggering refusal responses, thereby effectively bypassing existing detection techniques.\nOur experiments demonstrate AIR's effectiveness across state-of-the-art LLMs, achieving an attack success rate (ASR) exceeding $\\textbf{90}$% on most models, including GPT-4o, Claude-3.5-Sonnet, and Qwen-2-72B. Notably, we observe an inverse scaling phenomenon, where larger models are more vulnerable to this attack method. These findings underscore the urgent need for defense mechanisms capable of understanding and preventing contextual attacks. Furthermore, we introduce a cross-model attack strategy that leverages less secure models to generate malicious contexts, thereby further increasing the ASR when targeting other models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Adversarial attacks",
"Jailbreak",
"Security",
"Black box",
"LLM",
"Alignment",
"Cross-Modality alignment",
"in context learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/cdfdf0893b19cfe58e804bd80df6b59fb16e4ee5.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "You Know What I'm Saying: Jailbreak Attack via Implicit Reference"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yVeNBxwL5W | MRS: A Fast Sampler for Mean Reverting Diffusion based on ODE and SDE Solvers | main | Active | Fast Sampler;Mean Reverting Diffusion | generative models | 5;6;8;8 | 3;4;5;3 | 3;4;3;3 | 3;3;3;3 | 3;4;4;3 | 6.75 | 3.75 | 3.25 | 3 | 3.5 | 0.406181 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1). The paper is very well written, and easy to understand especially very coherent even with the math part.\n\n2). The core idea of the paper is very interesting. Especially how the authors have formulated the core idea.\n\n3). The shown results are very impressive and competitive.\n\n4). Even though the concept behind the \"sampling trajectory\" part is simple, the visualizations obtained through it really helps to understand the core concept and contrast with existing methods. \n\n5). The MRS is plug and play."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Standard MR Diffusion models typically require a high number of function evaluations to obtain high-quality samples. To address this limitation, MR Sampler introduces a novel approach that combines an analytical function with an integral parameterized by a neural network. The proposed method demonstrates a 10 to 20-fold speedup across various image restoration tasks, obtaining efficient sampling without compromising quality."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I believe this is a great paper with solid reasoning and a well-thought-out approach. However, I have some questions regarding the numerical implementation and the comparison methods used.\n\n1). Can the authors elaborate on why only the backward difference method is used? Are there specific benefits to this approach over others? \n\n2). The proposed MR Sampler is only compared with posterior sampling, and Euler-Maruyama discretization. What are the current SOTA methods? How does the proposed approach compare with these, beyond posterior sampling and Euler-Maruyama?\n\n3). I would like to understand why the authors say \"frequently fall outside\" in this part (line304)\n \"Although the standard deviation of this Gaussian noise is set to 1, the values of samples can frequently fall outside the range of [-1,1]\"\n\n4). Could the authors specify the neural network architecture they used? \n\nPlease address these, and I'm leaning towards acceptance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Why wasn't an ablation study done on n, k?\n- Why wasn't wall clock time reported? Please provide.\n- Why weren't there comparison to standard (non-MR) SDE fast samplers? This comparison should include both reconstruction quality for inverse problems (since this is given as the main motivation for MR diffusion) and computational time.\n- How was guidance incorporated for image restoration? Does the particular method for incorporating the degraded observation make a difference for the performance of MR sampler?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Addresses a gap in fast sampling for mean-reverting diffusion\n- Provides two alternatives focusing on noise/data prediction\n- Relevant ablation studies for some parameter choices\n- Evaluates performance in image restoration tasks"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a fast sampling algorithm for mean-reverting diffusion. This addresses a gap for mean-reverting diffusion SDE solvers, as current fast samplers for SDEs do not readily apply to mean-reverting SDEs. Two flavors of solvers are proposed, one based on noise prediction, and the other on data prediction. Results show both perform similarly for larger NFEs, but the latter outperforms the former for fewer NFEs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- No ablation study on n, k\n- Wall clock time not reported, only NFE improvement is discussed\n- No comparison to standard SDE fast samplers \n- The whole description is for unconditional sampling, but all results are for image restoration. How is the guidance incorporated?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How did you determine the optimal number of sampling steps? The choice of steps varies across different experiments, but the selection criteria are not immediately clear. Could you provide more explanation on how to select the number of steps to balance efficiency and performance?\n- What was the rationale behind choosing a low-light and motion-blurry dataset for the visualizations in Figure 2? \n- In reference to the related paper [1], it is mentioned that solving the SDE typically requires only 22 steps, although they still use 100 steps. Since you opted to use 100 steps aswell, I am wondering why the performance with 20 steps is still so substantially different.\n\n[1] Luo, Ziwei, et al. \"Image restoration with mean-reverting stochastic differential equations.\" arXiv preprint arXiv:2301.11699 (2023)."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors' proposed method demonstrates robust performance across a variety of experiments, consistently showing strong results in both quality and efficiency. Notably, it outperforms posterior-based and Euler-based samplers when using a high number of sampling steps. The performance advantage becomes even more pronounced as the number of steps is reduced, underscoring the method's effectiveness with low-step counts.\n\n- It is particularly encouraging to see that the proposed method performs best across almost all of the 16 tested image perturbations. This breadth of performance suggests that the approach is not only effective but also adaptable to different types of image perturbations.\n\n- The paper is generally well-organized and written. Key concepts and technical choices are clearly explained.\n\n- Overall, this is a well-rounded paper that offers convincing results and provides a method that should be straightforward to integrate with existing frameworks. I also appreciate the authors’ decision to release the code for easy reproducibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a novel algorithm, named MRS, designed as a sampler to reduce the number of sampling steps required for Mean Reverting Diffusion. To achieve this, they solve the reverse-time stochastic differential equation (SDE) alongside the probability flow ordinary differential equation, resulting in a reduction of required steps to between 5 and 15 while maintaining high-quality outcomes. The method is compatible with mainstream parameterizations, making it broadly applicable. The authors conduct an extensive evaluation across 16 different image perturbations, demonstrating that their proposed sampler generally outperforms others in both efficiency and speed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The choice of the number of sampling steps is not entirely clear. While it seems that setting this value to 20 yields acceptable results, this may not fully leverage the acceleration benefits of the proposed method. Further guidance on selecting an optimal number of steps or a discussion on its trade-offs would make this aspect more transparent.\n- There is some ambiguity regarding the number of function evaluations (NFEs) required for MR Diffusion. The authors mention that MR Diffusion typically requires hundreds of NFEs, but in the current paper, this value is set to 100, consistent with the original paper. This raises questions about whether the maximum performance of the Posterior and Euler-based sampling methods could be higher if a larger number of steps were used. Given the performance gains depicted in Figure 2, it would be helpful to clarify if the efficiency improvements observed are partly due to these optimized sampling steps.\n- Table 15 in the appendix contains incorrect highlight"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. How does reducing NFEs affect sampling quality? Could you compare this with speed-quality trade-off methods like \"Come-Closer-Diffuse-Faster\"?\n\n2. Do different image restoration tasks benefit differently from the MR Sampler in terms of speed or quality?\n\n3. Why is Mean-Reverting SDE better for posterior sampling compared to other methods like Optimal Transport or Schrödinger Bridge?\n\n4. Can you provide more details on the stability of data prediction models at low NFEs, and when would noise prediction be preferable?\n\n5. Can MR Sampler be extended to larger or multi-modal tasks, such as video generation or text-to-image models?\n\n6. Why are there no comparisons with SOTA models in terms of both speed and quality? Would this strengthen the paper?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper introduces a novel approach to sampling in Mean Reverting Diffusion models, which itself is a relatively new paradigm compared to more conventional diffusion processes like Variance Preserving and Variance Exploding SDEs. The key originality lies in the way the authors combine semi-analytical solutions derived from ODE and SDE solvers with MR Diffusion, enabling faster sampling. This combination is innovative as it offers a fresh perspective on accelerating diffusion models by altering the underlying stochastic differential equation structure rather than focusing solely on the score function as in prior work.\n\nAdditionally, the Mean Reverting SDE framework offers a natural integration of image conditions into the generation process, making it more applicable to tasks that require controllable generation, such as image restoration and inpainting. This is a novel contribution compared to prior work that has typically used Diffusion Schrödinger Bridge or Optimal Transport methods for similar purposes. The MR Diffusion model’s ability to address multiple tasks beyond denoising is a creative and valuable extension of existing frameworks.\n\nThe technical depth of the paper is commendable. The authors offer a rigorous derivation of the semi-analytical solution and provide substantial theoretical grounding for their approach. The use of probability flow ODEs (PF-ODEs) and reverse-time SDEs in the MR Diffusion context is clearly explained and well-justified. The semi-analytical solution, which combines an analytical function and a neural network-based integral, reduces computational complexity without compromising sampling quality.\n\nThe experiments are comprehensive and cover ten different image restoration tasks, providing strong empirical support for the proposed method. The use of multiple performance metrics (e.g., FID, LPIPS, PSNR, SSIM) demonstrates that the authors took a thorough approach to evaluating both the quality and speed of the generated samples. The speedups of 10-20x, without a significant drop in sample quality, highlight the robustness and practical value of the proposed method.\n\nThe paper is well-structured, with clear sections on the methodology, theoretical contributions, and experimental validation. The technical content, while complex, is made accessible through the use of visual aids (e.g., Figure 1 comparing qualitative sampling results, and charts showing performance metrics) and clear explanations of the mathematical formulations. The distinction between noise prediction and data prediction models, and the impact of these choices on sampling quality and stability, is clearly delineated and contributes to a deeper understanding of the technique.\n\nThe authors also provide appendices with detailed proofs and further experimental results, ensuring that the methodology is reproducible and the claims are verifiable. Overall, the clarity in presenting a technically complex subject is a strong point of the paper.\n\nThe proposed MR Sampler addresses a significant challenge in the field of diffusion models: accelerating the sampling process without sacrificing quality. The speedups achieved in this work, which range from 10x to 20x across multiple tasks, are substantial and have clear practical implications, particularly in real-time applications such as image restoration. This makes the method highly relevant for use cases that demand controllable and fast generation, such as medical imaging, video processing, and computational photography.\n\nThe method’s plug-and-play nature is another strength. It is adaptable to a variety of existing diffusion models and does not require retraining, making it easy to integrate into different applications. The broad applicability to various image restoration tasks (e.g., dehazing, inpainting, motion-blur reduction) enhances the significance of the work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new perspective on improving the efficiency of sampling in Mean Reverting Diffusion models. This approach introduces MR Sampler, which leverages both ordinary differential equations (ODE) and stochastic differential equations (SDE) to enhance the speed of the sampling process. While MR Diffusion provides a fresh perspective by modifying the structure of the SDE to make controllable image generation simpler and more natural, the main challenge it addresses is the inefficiency of sampling, which typically requires hundreds of function evaluations (NFEs) to generate high-quality outputs.\n\nThe authors build on prior work that primarily focused on denoising tasks using ODE solvers, and they extend this framework to a broader set of tasks without requiring additional training. The key technical contribution is the semi-analytical solution they derive, which reduces the number of NFEs significantly while maintaining competitive sampling quality. In fact, MR Sampler shows a speedup of 10 to 20 times across ten different image restoration tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "A notable limitation is that the paper places more emphasis on the speed of sampling rather than the quality. While the authors claim comparable quality in terms of metrics such as FID and LPIPS, they do not provide an in-depth analysis of the trade-off between sampling speed and output quality. This is a gap, as accelerating sampling without sacrificing quality is one of the central challenges in diffusion models. The authors could have benefited from comparing their approach with other speed-quality trade-off techniques, such as those mentioned in prior work (e.g., the 'Come-Closer-Diffuse-Faster' approach). Although these methods might be older, they are relevant in establishing a clear benchmark and providing a deeper understanding of the trade-offs at play. (like https://openaccess.thecvf.com/content/CVPR2022/papers/Chung_Come-Closer-Diffuse-Faster_Accelerating_Conditional_Diffusion_Models_for_Inverse_Problems_Through_Stochastic_CVPR_2022_paper.pdf and https://arxiv.org/abs/2108.01073 )\n\nThe experiments demonstrate a clear focus on speedups, and while they are rigorous and cover a variety of tasks (e.g., image dehazing, inpainting), the lack of comparison with more state-of-the-art methods suggests that the method may not yet be positioned as a new state of the art but rather as a faster alternative with comparable performance under specific conditions.\n\nOverall, the introduction of the Mean Reverting SDE into the posterior sampling stage with ODE solvers is a fresh and promising perspective. However, the paper would benefit from addressing the broader implications of their method, particularly how the mean-reverting approach impacts the balance between speed and quality. Including more comprehensive comparisons with established trade-off techniques would further strengthen the contribution but overall this is a good paper, potentially worth discussing at ICLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a fast sampler for Mean Reverting Diffusion based on both ODE and SDE solvers."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mrs,\ntitle={{MRS}: A Fast Sampler for Mean Reverting Diffusion based on {ODE} and {SDE} Solvers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yVeNBxwL5W},\nnote={under review}\n}"
},
"abstract": {
"value": "In applications of diffusion models, controllable generation is of practical significance, but is also challenging. Current methods for controllable generation primarily focus on modifying the score function of diffusion models, while Mean Reverting (MR) Diffusion directly modifies the structure of the stochastic differential equation (SDE), making the incorporation of image conditions simpler and more natural. However, current training-free fast samplers are not directly applicable to MR Diffusion. And thus MR Diffusion requires hundreds of NFEs (number of function evaluations) to obtain high-quality samples. In this paper, we propose a new algorithm named MRS (MR Sampler) to reduce the sampling NFEs of MR Diffusion. We solve the reverse-time SDE and the probability flow ordinary differential equation (PF-ODE) associated with MR Diffusion, and derive semi-analytical solutions. The solutions consist of an analytical function and an integral parameterized by a neural network. Based on this solution, we can generate high-quality samples in fewer steps. Our approach does not require training and supports all mainstream parameterizations, including noise prediction, data prediction and velocity prediction. Extensive experiments demonstrate that MR Sampler maintains high sampling quality with a speedup of 10 to 20 times across ten different image restoration tasks. Our algorithm accelerates the sampling procedure of MR Diffusion, making it more practical in controllable generation."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Fast Sampler",
"Mean Reverting Diffusion"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/47e20748abfa76f60f816e9e44971cd6a89a6798.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MRS: A Fast Sampler for Mean Reverting Diffusion based on ODE and SDE Solvers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yWoV4Ca6ji | Towards Understanding the Universality of Transformers for Next-Token Prediction | main | Active | Transformers;In-Context Learning;Deep Learning Theory | learning theory | 3;3;5;6;6 | 3;4;3;2;4 | 3;3;3;3;3 | 3;2;3;3;3 | 2;3;4;3;3 | 4.6 | 3.2 | 3 | 2.8 | 3 | -0.315244 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Just to make sure I understand, is the main technical piece here handling the softmax activation? If the goal was to extend linear activation to the kernelized setting, would that just be adding the \"kernel trick\" to Sander et al (2024)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper furthers our understanding of why transformers, like the amazing LLMs, are able to learn in context. It extends to learning linear functions with softmax attention. It provides nontrivial, rigorous guarantees as well as experimental evidence corroborating the theory."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper extends a line of work showing what transformers are capable of learning. In particular, it shows that causal transformers are capable of learning kernelized linear functions, in context, which means that each token is the same linear function of the previous token. Previous work had focused on linear attention whereas this work extends to softmax attention. It also handles period sequences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This is not a weakness of the work as much as a fit for the conference. This work seems important but also could arguably be more appropriate for a specialized theoretical audience like various theory conferences (COLT/ALT/STOC/FOCS). Previous works have covered linear functions and the extension to kernels and softmax attention is undoubtedly important for our understanding, but the question is how many of the conference attendees will appreciate these contributions. Of course, theory is a topic on the CFP and transformers are a key interest for ICLR so it's not a bad fit."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The questions and suggestions are given by the weakness section. The reviewer believes that this work requires significant improvement to reach his/her expectation and doesn't seem to be possibly addressed within the time frame of ICLR 2025 rebuttal phase.\n\nHowever, if the author is able to resolve at least (1) or (2) in the question section, the reviewer might be able to improve his/her opinion."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The strength comes mainly from the problem setup of studying next-token prediction, which seems to be quite an important task. The proof looks sound. The presentation of the work is clear. The authors perform empirical evaluation for their theoretical results. The discussion of theoretical results are provided in the work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers the capacity of causal Transformers to the prediction of next token $x_{t+1}$ given autoregressive sequence $(x_1,\\ldots,x_{t})$ as a prompt where $x_{t+1}=f(x_t)$. They explicitly construct a Transformer that learns the mapping $f$ in-context through a causal kernel descent method (which connects to the Kaczmarz algorithm) and prove long scope guarantees."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The major concern over this work is listed as follows.\n\n(1) The problem setup is not realistic enough in the sequential relationship. In particular, the reviewer does not believe either the language or the time series admit simple relationship of $x_{t+1}=f(x_{t})$. Hence, the reviewer cannot understand how this work contributes to the understanding of Transformers on these modern ML tasks, which is believed to be very crucial to the ML community. Moreover, the reviewer believes that RNN might be able to do the same task as the results claimed in this work for Transformers. If this is true, the results might universally hold for many sequential models. Then, the unique advantage of Transformer is not highlighted.\n\n(2) The problem setup is not realistic enough in the deterministic assumptions. The reviewer believes that either Markov chains or some random autoregressive models are necessary for the purpose of studying the universality of next token prediction of Transformers. At the current stage, this fully deterministic dynamic system seems rather naive.\n\n(3) The limitations of this work is not properly discussed at the conclusion part."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors clarify how their results using particular kernel choices and corresponding attention mechanisms contribute to understanding the universality of transformers in learning sequence-to-sequence functions and what they mean by universality in this context?\n3. In Definition 2 could the authors clarify how is the periodicity enforced in instance (3). \n4. In Proposition 1, it’s unclear what $n$ refers to, as elsewhere it denotes the number of layers, but here it relates only to the first layer that computes augmented tokens. In Appendix A.1, $n$ seems tied to the specific positional encoding. Could the authors clarify this?\n5. Is it correct that in the presented model, the first layer computing the augmented tokens always uses softmax attention and does not have a skip connection, whereas the identical layers can employ different normalizations (softmax, linear, exponential) and include skip connections?\n6. In the paragraph: Augmented tokens, could the authors clarify why $e_1^0 := (0_d,1,x_t,1,0_d,0_d)$ rather than $(0_d,1,0_d,1,x_t,0_d)$ based on the feed-forward map in appendix A.1.\n7. (Minor) In Remark 1, there may be a typo in the definition of $\\epsilon_1$.\n8. In Equation 8 could the authors provide more insights regarding the interpretation of $\\mu$ and its relation with the data-generating process?\n9. For instance (4) described in section 5, did the authors try to train a model, and if so, what were the results?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written. \n- The problem is conceptually well-motivated.\n- The proposed method and analysis are clearly explained with all necessary details.\n- The manuscript includes both rigorous proofs and empirical experiments that validate the findings.\n- The proof technique and its connection to the Kaczmarz algorithm in Theorem 3 are insightful."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the approximation abilities of causal transformers when predicting the next token in-context for autoregressive sequences. In particular, it focuses on specific instances where the context-dependent map $f$ determining the next token $x_{t+1} = f(x_t)$ in the sequence belongs to the RKHS of a given kernel $k$ and is either linear or the sequence is periodic. The authors introduce the causal kernel descent method and theoretically analyze its convergence properties. They provide a construction of the transformers layers with linear, exponential and softamx attention that can implement this method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The choice of normalization in the attention appears to be specifically tailored to match the kernel used in the data-generating process (see Definition 2 and Theorem 1). This design decision raises questions about the generalizability of the findings, making it unclear how these results might extend to broader settings.\n2. Since the analysis is confined to the class of functions determined by the chosen kernel, it is not clear how the results illustrated in the paper contribute towards understanding the universality of transformer models.\n4. The theoretical results presented in the paper rely on taking the limits as the context length $t$ and the number of layers $n$ approach infinity. However, the empirical experiments reported in Figure 4, demonstrate that Transformers with a finite number of layers, trained using the Adam optimizer, can achieve better performance than the proposed infinite-layer model. This discrepancy suggests that finite-sized trained Transformers may implement a different algorithm than the causal kernel descent method.\n5. Exponential convergence for the case of exponential kernel and exponential or softamax attention requires periodic sequences."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. 250 typo $\\epsilon_1(n,t) = \\mathcal{M}^n(x_{1:t}) - \\mathcal{M}(x:1,t)$?\n\n2. In the experiment section, how did you compare fine-tuning $\\mathcal{M}^n$ with infinitely deep model? Did you just take $\\mathcal{M}$ to be the ground truth? (Figure 5 is missing)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Very mathematically sound and interesting. The universality of this next-token predictability is a strong result. \n\n2. Causal kernel descent is a very intriguing framework to analyze sequence models in context prediction, taking the important aspect of masking out the future into account."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper theoretically studies the universality of Transformers for next-token prediction. The authors specifically consider the sequences of the form $x_{t+1} = f(x_t)$. In particular, they theoretically analyze two types of instances: for linear $f$ and periodic sequences (exponential kernel). They construct augmented tokens and show that an explicitly constructed Transformer can learn in context to accurately predict the next token asymptotically, through a causal kernel descent method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The theoretical setting is limited to when $f$ is linear and when the sequence is periodic, with each point in the context being on the unit sphere. Neither cases seem too complicated in the first place, and so the \"universality\" does not seem well justified in the context of Theorem 1. Perhaps the authors can justify the reduction to these sub-classes of functions more explicitly.\n\n2. Both the process of generating the augmented token $e_t^0$ from $x_{0:t}$, as well as the causal kernel descent iteration theoretically involves an infinitely deep Transformer as $n\\to\\infty$. It might be more informative to discuss the theoretical implications of finite depth. \n\n3. The universality of next token predictability result is asymptotic in the context length $t$. Although the convergence is fast enough (exponential), most empirical observations are in the non-asymptotic regime in terms of context length. It might be good to explicitly provide analysis on this."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- L178: why is your setup better reflective of how LLMs are trained? Please elaborate.\n- L194: Why does the unit-norm of $x$ imply that the kernel values for $k(x_{i},x_{i})$ are same for all $i$?\n- Eq (3): why do you need to model the projection (in this exact way of picking the last $d$ coordinates)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "**Despite not being very familiar with kernel methods and learning theory (but somewhat familiar with Transformers and in-context learning), I believe that the authors' contributions are relevant and interesting, though I cannot assess their (learning theoretical) details.**\n\nParticular, I find the paper is strong in the following aspects:\n- crisp and clear setup in the abstract\n- intuitive Fig 1 (for the setting, not for the results)\n- very well summarized contributions list\n- the results seem to be strong (I am uncertain about how realistic the setting is)\n\nNonetheless, as I will detail below, **I think the presentation needs to be significantly improved**."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper aims to advance our understanding of causal Transformers for next-token prediction in autoregressive settings by showing how to construct networks that can solve the next-token prediction in-context learning task."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Major points\n- the paper is very dense, even though the authors could have used an additional 10th page. I'd suggest more explanations for the result, and less formulas in the main text\n- the intuition seems to be missing (for me) as to why you need the augmented tokens. What do they mean?\n- Figure 2 needs to be explained; the one-sentence reference in L342 does not explain (for me, at least) what all the quantities in the figure are and how I should think about them).\n- **It is unclear to me how a Transformer can implement Eq (7) if one problem is with the non-causal descent.** You still need to modify the training method, right? Please elaborate how this works/correct me if I am wrong (I see how you can construct the Transformer, but I do not see how the Transformer itself can account for the change in the descent method). If this point is about the \"meta-optimization\" during solving the in-context learning task, then please say so explicitly\n- What is the rationale behind the construction of $f$ in the _\"More Complex Iterations\"_ paragraph?\n\n### Minor points\n- please use equation numbers for the main equations \n- please define all quantities before you use them\n- Definition 1: what is $S^{d-1}$?\n- Definition 2: what is $O(\\cdot)$?\n- Definitions 1 and 2 would be better suited as assumptions\n- L206: please explain what $e$ is, what the indices stand for, and why you add multiple beginning-of-sequence tokens and tokens of \"1\".\n- Proposition 1: what is $\\mathcal{N}?$\n- L246: what makes these constructions explicit? As far as I understand, you specify the dimensions, but that leaves many degrees of freedom (ie, all the elements can be of any value)?\n\t- From Prop. 5 and checking A.8, I get a sense what these matrices would be. Thus, as you have a contruction, please refer to this fact to the reader, otherwise, calling a matrix explicit without saying that you can calculate all the values can be misleading.\n- L286: do you mean the update of $u_{t}$ depends on $x_{1:t}?$"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We investigate how causal Transformers can predict the next token in a sequence by constructing models that use a causal kernel descent method to learn context-dependent functions."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Understanding the Universality of Transformers for Next-Token Prediction},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yWoV4Ca6ji},\nnote={under review}\n}"
},
"abstract": {
"value": "Causal Transformers are trained to predict the next token for a given context. While it is widely accepted that self-attention is crucial for encoding the causal structure of sequences, the precise underlying mechanism behind this in-context autoregressive learning ability remains unclear. In this paper, we take a step towards understanding this phenomenon by studying the approximation ability of Transformers for next-token prediction. Specifically, we explore the capacity of causal Transformers to predict the next token $x_{t+1}$ given an autoregressive sequence $(x_1, \\dots, x_t)$ as a prompt, where $ x_{t+1} = f(x_t) $, and $ f $ is a context-dependent function that varies with each sequence.\nOn the theoretical side, we focus on specific instances, namely when $ f $ is linear or when $ (x_t)$ is periodic. We explicitly construct a Transformer (with linear, exponential, or softmax attention) that learns the mapping $f$ in-context through a causal kernel descent method. The causal kernel descent method we propose provably estimates $x_{t+1} $ based solely on past and current observations $ (x_1, \\dots, x_t) $, with connections to the Kaczmarz algorithm in Hilbert spaces. We present experimental results that validate our theoretical findings and suggest their applicability to more general mappings $f$."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Transformers",
"In-Context Learning",
"Deep Learning Theory"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/04f933c50a3fe13a42c26fb9a616613af2e158d6.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Understanding the Universality of Transformers for Next-Token Prediction"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yXCTDhZDh6 | Point-SAM: Promptable 3D Segmentation Model for Point Clouds | main | Active | 3D vision;promptable segmentation;point cloud segmentation | applications to computer vision, audio, language, and other modalities | 5;5;5;6;6;6 | 5;4;3;5;4;4 | 2;3;3;3;3;3 | 2;3;2;2;3;3 | 3;3;3;3;2;2 | 5.5 | 4.166667 | 2.833333 | 2.5 | 2.666667 | 0.242536 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Regarding the generation of pseudo labels, lines 265-269 state that the generated 3D proposal, using the 1st view’s random selected 2D prompt from SAM’s proposal, is projected back into the same view. This is then used to sample a 2D prompt (negative prompt) from the error region between the two (2D-3D proposal). Based on Figure 3, this is done in the next view (view 2). The first is used to sample the 2D prompt, the generated 3D proposal is used to prompt SAM with the 2nd view as input, and then the negative prompt is sampled for the generated 2nd 2D proposal. So, which of the two is the correct procedure?\n- In Table 3, the MV-SAM baseline is absent for the ScanObjectNN dataset. Additionally, it's unclear which specific approach the term “InterObject3D++” refers to (lines 391 and 395). Is this the same baseline used in AGILE3D? \n- In Figure 4, what is the difference of blue and red point prompts?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ The method is a step forward towards 3D foundational models, eliminating the need of using multiple 2D views of the 3D object and 2D-3D lifting of SAM proposals at inference, while it provides the ability of refining the 3D proposals with additional prompts (as seen in supp.).\n+ Unified training strategy on 3D point clouds across several datasets, covering different modalities either at annotation or scale level (part, object masks; single object, entire scenes).\n+ Good performance on zero-shot point-prompted segmentation w.r.t. alternative methods.\n+ Knowledge distillation from 2D foundational models such as SAM during training to further enhance the generalizability of Point-SAM."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of deploying 3D foundational models by introducing a novel 3D promptable segmentation model for point clouds, named Point-SAM. This model extends the Segment Anything Model (SAM) into the 3D domain by developing a promptable 3D architecture that utilizes both point and mask prompts. These prompts, together with the input point cloud, are processed through a transformer-based encoder-decoder architecture to generate a 3D segmentation mask.\n\nThe Point-SAM model first tokenizes the input point cloud using one of two approaches: a single set abstraction layer (based on PointNet++), which creates patch-based features centered at each patch’s centroid encoding local geometry, or a Voronoi partition-based tokenizer, where each Voronoi cell represents the patch’s centroid, and point-wise features are max-pooled within each cell. The Voronoi-based method reduces computational and memory costs by bypassing the need for dense point-wise feature extraction in each centroid's vicinity. Patch features are then extracted through a ViT model, and mask prompts are incorporated from previous decoder iterations. The model upscales these patch embeddings to the original point cloud resolution using 3-nn inverse distance weighting. Following this, a two-way transformer facilitates interaction between point prompts and point cloud embeddings by utilizing cross-attention operations. The final 3D proposal is derived by applying an output token processed through a MLP.\n\nPoint-SAM is trained on a diverse set of datasets that include part-level and object-level annotations across single-object and scene-level modalities. To improve generalizability on out-of-distribution data, the ShapeNet dataset is used to transfer knowledge from SAM. This is achieved by generating 2D pseudo masks that serve as prompts to a pretrained Point-SAM model, iteratively refining its 3D proposals.\n\nPoint-SAM's is evaluated on several benchmarks for the tasks of zero-shot point-prompted segmentation, zero-shot object proposal and few-shot part segmentation, where it achieves performance that is either on par with or surpasses competing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The proposed Voronoi diagram tokenizer while it manages to lower the computational and memory cost of the overall pipeline, in many cases it fails to surpass the performance of the k-nn based tokenizer. \n- Regarding the OOD scenarios and particularly the PartNet-Mobility, the held-out categories (scissors, refrigerators, and doors) are all part of the PartNet, thus Point-SAM has seen these during training. This weakens the zero-shot transferability of the method.\n- The process of generating pseudo labels and transferring knowledge from SAM is somewhat unclear (see Questions)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the \"weaknesses\" section"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper extends a powerful 2D segmentation anything model (SAM) into the 3D point cloud domain\n2. It introduces a novel data engine to generate multi-level pseudo-labels and augment the training data\n3. The interactive segmentation video is impressive, which illustrates its potential real-world application"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper under review presents a 3D promptable segmentation model for point clouds, called Point-SAM. Similar to the architecture of Segment Anything Model(SAM), Point-SAM also contains three parts: a point-cloud encoder, a prompt encoder and a mask decoder. A Voronoi tokenizer is adopted to divide the point cloud into patch tokens. To improve the generalization of the model, multiple existing 3D segmentation datasets are included in training. Additionally, to augment part-level segmentation data, the authors utilize both pre-trained Point-SAM to obtain an initial 3D mask and then leverage SAM to refine the mask using additional views. The method demonstrates good performance on several indoor and outdoor scenes and showcases applications such as interactive 3D annotation, zero-shot 3D instance proposal and few-shot part segmentation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of Visualization Results: The authors provide only a limited number of visualizations in both the main paper and supplementary materials. The qualitative results presented in Figure 4 are inadequate. It is anticipated that more complex examples, such as those depicted in the supplementary videos, could be included. Moreover, several applications discussed in the paper, such as few-shot part segmentation and zero-shot object proposal generation, lack corresponding visualization results.\n\n2. In line 40, the paper asserts that \"multi-view images only capture the surface, making it infeasible to label internal structures.\" However, the paper does not include examples, such as drawers, to demonstrate the superior performance of Point-SAM in these scenarios. The authors are expected incorporate additional experiments or examples to make this claim more convincing.\n\n3. The segmentation quality does not look good: The paper does not directly compare its results with those of other 3D frameworks that merge results across multiple views, despitethe experiments on MVSAM. While it is acknowledged that most of these frameworks require post-processing, which can be time-consuming, their segmentation quality reported in those papers are superior, particularly along boundary regions, such as teh results in SAM3D and more advanced methods like Gaussian Grouping.\n\n4. Prompt Point Selection: Could the authors provide a more detailed explanation of how point prompts are selected in the experiments? In Figure 4, the points are not consistently placed, and some points are marked as negative in one method while none are negative in another. Also. the positions of points are different."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could you provide more visualizations for object-level segmentation in OOD scenarios to demonstrate the generalization of the method?\n\n2. Are there any special design considerations when training with both indoor datasets and object-level datasets together?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper presents a clear writing logic, effectively outlining the challenge to be addressed and the three distinct perspectives of the research.\n2. The 3D segmentation task is a crucial direction in embodied intelligence, as it enables machines to understand and interact with complex environments. Moreover, scaling up to achieve 3D foundation models presents significant value."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the 3D promptable segmentation task by examining it from three key aspects: task, model, and data. The authors propose a unified model named Point-SAM, which utilizes a point cloud representation and introduces a novel tokenizer based on Voronoi diagrams. In terms of data, they explore the generation of pseudo labels using SAM. The experimental results highlight the model's robust zero-shot transferability to unseen point cloud distributions and new tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main consideration is the technical novelty of this paper, which leads me to feel that the overall contribution is somewhat weak.\n\n1. In terms of model design, the method introduces the Voronoi tokenizer, which is innovative. However, compared to KNN, there is no significant performance improvement; the gains are only noticeable when the number of prompt points is low.\n2. Additionally, there are many works utilizing SAM for 3D pseudo-labeling, whether in autonomous driving or in indoor settings, such as SAM3D, which also leverages multi-view consistency in 2D projections to refine results. What distinguishes the approach in this paper from previous works in terms of innovation?\n3. I have some concerns about the practicality of this method. Is it can serve as a more general tool for 3D segmentation labeling? For instance, in autonomous driving, if segmentation requires placing points on every object, it seems inefficient. The challenge with object-level distinction lies in identifying the various parts, making the placement of prompt points crucial.\n\nBesides, I would like to share some suggestions or open problems:\n\n1. I think semantic information is crucial for 3D foundation models, as segmentation can vary in granularity, and semantics often dictate the level of segmentation.\n2. Additionally, part segmentation without the need for interaction points is also valuable, such as distinguishing different parts in robotic grasping tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors develop a data engine to generate pseudo labels, providing sufficient diverse masks for datasets lacking ground truth. This allows Point-SAM to be trained with more heterogeneous datasets.\n- Following the design philosophy of SAM, the implementation on 3D point clouds is shown through experiments to endow Point-SAM with zero-shot capabilities.\n- The Voronoi tokenizer achieves comparable performance to the KNN tokenizer, while showing superior efficiency in KITTI360 dataset."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper adheres to the philosophy of the Segment Anything Model by proposing Point-SAM, a 3D promptable segmentation model specifically designed for point clouds. It primarily makes two contributions: (1) a scalable and efficient transformer-based architecture that facilitates 3D promptable segmentation; and (2) a data engine that distills knowledge from the Segment Anything Model to generate pseudo labels. Experiments conducted on various indoor and outdoor datasets have demonstrated its zero-shot transferability and significant application potential."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The challenges presented in the introduction are not well sovled by the proposed method. For instance, the paper mentions \"There is no unified representation for 3D shapes,\" yet does not provide a unified representation and only designs for point cloud representation. \n- While the authors have made efforts to train Point-SAM on a diverse set of datasets, the reliance on synthetic datasets and the dominance of indoor scenes may limit the model's generalizability to outdoor and more varied environments. The paper would benefit from an evaluation on a broader range of datasets, particularly those capturing diverse outdoor scenes such as Waymo Open Dataset [1] and nuScenes [2].\n- Although the paper claims efficiency improvements with the Voronoi tokenizer and tests it on KITTI360 with 10 prompt points, a comprehensive analysis of the computational costs is lacking. The authors should provide more detailed benchmarks comparing Point-SAM with other methods in terms of both speed and memory usage.\n\n[1] Sun, Pei, et al. \"Scalability in perception for autonomous driving: Waymo open dataset.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020.\n\n[2] Caesar, Holger, et al. \"nuscenes: A multimodal dataset for autonomous driving.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could you clarify the difference between the proposed pseudo label generation method and \"SERF: Fine-Grained Interactive 3D Segmentation and Editing with Radiance Fields\"?\n\nAdditionally, the proposed Voronoi approach shows only minimal improvement compared to KNN. Could you clarify the efficiency comparison between these two?\n\nBut overall, it is a good work."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Propose a better tokenizer and show reasonable performance improvement compared with baseline."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a method for achieving generalizable 3D point segmentation by leveraging prior knowledge from the 2D Segment Anything framework."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Missing references:\n\n\"Segment Anything 3D\"\n\"SANeRF-HQ: Segment Anything for NeRF in High Quality\"\n\"SERF: Fine-Grained Interactive 3D Segmentation and Editing with Radiance Fields\"\n\"Segment Anything in 3D with Radiance Fields\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Besides the questions in weakness, I also have some other questions:\n1. The authors listed Replica as an evaluation datasets in Table 2, but only compared with OpenMask3D in appendix but not with other baselines. Why do you make this choice? Combined with what mentioned in Weakness 1 and Table 6, is this due to your model is very sensitive and not so well-defined for large scale data?\n2. According to Figure 4, for KITTI360 and S3DIS datasets, are the red and blue balls the visualization of prompt points? If so, why the prompts to Point-SAM and AGILE3D are not consistent? Do you use the same prompt in your main experiments?\nI am open to raising my score if the authors can thoroughly address the weaknesses and questions."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Unlike many 2D-to-3D segmentation lifting work, this paper chooses to distill knowledge from SAM in training phase instead of aggregating in very evaluation, making it more efficient and not requiring running SAM for several times during inference. The shows great performance on choosing datasets, showing great performance on zero-show segmentation task due to distilling and better model design."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is mainly built to tackle 3D promptable segmentation task. The authors propose a segmentation model called Point-SAM, which can take 3D point prompts and mask prompts as input prompt, alongside a Voronoi tokenizer is used to fuse dense prompts. This model is trained on two phases: 1. existing well-labeled datasets; 2. unlabeled datasets, whose pseudo labels are generated by pre-trained Point-SAM from phase 1 and SAM, in order to distilling knowledge from SAM. They compare their method with baselines on zero-shot segmentation task, and showing much better performances."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main contribution of this paper can be divided into two folds: better model design and novel data engine to distill SAM instead of aggregating. So the paper should demonstrate the effectivity for both these two parts. However, the experiments are not strong enough:\n1. This model choose to compare with AGILE3D, a straightforward baseline. In main experiments, the experiment is not fair, since the model proposed actually trained on much larger datasets than the baseline. In order to have a fair comparison, they also trained the model with the same training set as baseline in Table 6.a. However, this method only show apparent better performance on 2 out of 4 datasets. The authors argue it may due to that the baseline is overfitting to these kinds of data, but it’s assertion is not strong enough, because these two datasets both tend to include scenes with much more points than the other two. At least a cross analysis ablation, i.e. train A test B and train B test A.\n2. The baselines are not thorough and strong enough. It is strange why the baseline is not trained on the same level of data. And the multi-view SAM aggregation baseline is not strong enough. The authors make up one, MV-SAM. However, there are plenty of such multi-view aggregating baselines, such as SAM-guided Graph Cut, SAI3D, Open3DIS, OpenIns3D.\n3. In Table 4, evaluating only on PartNet-Mobility is not sufficient. From the table, we observe that adding ScanNet is less beneficial than adding ShapeNet. Further explanation or additional experiments are needed to clarify this difference."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024pointsam,\ntitle={Point-{SAM}: Promptable 3D Segmentation Model for Point Clouds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yXCTDhZDh6},\nnote={under review}\n}"
},
"abstract": {
"value": "The development of 2D foundation models for image segmentation has been significantly advanced by the Segment Anything Model (SAM). However, achieving similar success in 3D models remains a challenge due to issues such as non-unified data formats, poor model scalability, and the scarcity of labeled data with diverse masks. To this end, we propose a 3D promptable segmentation model Point-SAM, focusing on point clouds. We employ an efficient transformer-based architecture tailored for point clouds, extending SAM to the 3D domain. We then distill the rich knowledge from 2D SAM for Point-SAM training by introducing a data engine to generate part-level and object-level pseudo-labels at scale from 2D SAM. Our model outperforms state-of-the-art 3D segmentation models on several indoor and outdoor benchmarks and demonstrates a variety of applications, such as interactive 3D annotation and zero-shot 3D instance proposal."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D vision",
"promptable segmentation",
"point cloud segmentation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/804584110652cd558e5b0bbde128d7bd2a5aaa31.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/277d4b0f6a2dec95a0dbababfe72d3dd91bb54da.zip"
},
"title": {
"value": "Point-SAM: Promptable 3D Segmentation Model for Point Clouds"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yYQLvofQ1k | Two Heads Are Better Than One: A Multi-Agent System Has the Potential to Improve Scientific Idea Generation | main | Active | Large Language Model;Multi-agent System;Collaboration Strategy;Automatic Scientific Discovery;Science of Science | applications to robotics, autonomy, planning | 3;3;5;5 | 5;3;3;4 | 2;2;3;3 | 2;2;2;3 | 2;3;3;3 | 4 | 3.75 | 2.5 | 2.25 | 2.75 | -0.301511 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The experimental part of the paper cannot illustrate the usability of scientific findings, which is relatively fatal. Can you provide some analysis or examples to prove that the scientific findings generated by VIRSCI can be instructive to us?\n\n2. From the paper, I saw that the multi-agent system team adopted a loop execution strategy during the discussion. Does this part of the paper propose a novel organizational structure and interaction mode? Can sequential execution guarantee the final effect of the experiment?\n\n3. The experiment setting in the ablation study was conducted under a 5-turn discussion. Is it possible that the effect of other turn discussions cannot produce such a conclusion? Experiments conducted under only one setting lack certain persuasiveness to show that these technologies are necessary, and whether it can be strengthened through examples to prove that these technologies do improve the overall performance of the system."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Originality\n\nThis paper is the first to apply the LLM-based multi-agent system to the problem of scientific discovery. It realizes the generation of research ideas in autonomous scientific discovery.\n\n2. Quality\n\nThis paper not only implements a multi-agent system for automatic scientific discovery, but also conducts a large number of experiments to verify the effectiveness of the system, and explores the different effects of system settings on the results. It also conducts a lot of experiments to verify the effectiveness of many methods in the system, which is a high-quality work.\n\n3. clarity\n\nThis paper clearly explains the construction and operation process of the system, and details the implementation of each step of the process. At the same time, the language description of the paper is very clear, and the technical explanation is detailed. The experiment part also clearly describes the experimental settings and experimental process. Many details in the experiment are explained and demonstrated in the appendix, which allows people to clearly understand all the technical details.\n\n4. significance\n\nThis paper is the first attempt to apply the LLM-based multi-agent system to the field of scientific exploration, which allows us to see greater possibilities of AI for science. This shows that in the future, multi-agent technology may really be able to make new and valuable scientific discoveries."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new multi-agent system VIRSCI, designed to mimic the teamwork inherent in scientific research. Multi-agent methods can produce more novel and influential scientific ideas than single agent. This indicates that integrating collaborative agents can lead to more innovative scientific outputs, offering a robust system for autonomous scientific discovery. The contributions of this paper are mainly in the following three aspects:\n\n1. A new multi-agent system VIRSCI was proposed, which constructed the entire pipeline from team organization to final idea formation.\n\n2. The experiment proves that VIRSCI has better performance than single-agent. At the same time, the paper explores the impact of different settings in the VIRSCI system on the final results.\n\n3. The simulation results are consistent with key findings in Science of Science, such as that new teams tend to produce more innovative research, demonstrating VIRSCI's potential as a powerful tool for future research in this field."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This paper not only pioneered the use of multi-agent technology in a completely new field, but also conducted sufficient experiments to verify the effectiveness of this system. The paper has a clear structure and logic, and the language is clear, which allows people to clearly understand the core ideas and innovations of the paper. However, this paper still has the following weaknesses:\n\n1. The indicators of the experimental part of the paper focus more on the novelty and dissimilarity of the ideas generated by VIRSCI, but are there any experiments that can illustrate the usability of scientific discoveries generated by VIRSCI? Is there an analysis of the feasibility of all the abstracts generated by the system, and what proportion of them are likely to provide exploration for our scientific discoveries after preliminary screening?\n\n2. The paper is very clear in its drawings and processes, but the organizational structure and interaction mode of each agent in each process are relatively poorly described. In addition, the differences and characteristics of the other scientist agents, except for the team leader, are not well described.\n\n3. The experimental scenario of the paper is relatively simple, and experiments were only conducted in the field of computer science, which is also mentioned in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could \"usefulness\" and \"practically-feasible\" metrics be added during the novelty assessment to provide a broader evaluation of the generated ideas?\n2. In section 3.1 under adjacency matrix section, why choose a simple increment of 1 in the adjacency matrix? Would a distribution function, like a normal distribution, or an explore-exploit model provide better results?\n3. Given the potential variation in capabilities across LLMs, have you assessed how team size or turn count might need adjusting for different models?\n4. How do you ensure that high-scoring abstracts are practically feasible for real-world scientific research?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The multi-agent approach proposed in this paper has the potential to greatly enhance the quality and breadth of scientific research. The discussion-oriented idea generation closely mirrors real scientific processes.\n2. The 5-step approach proposed in this paper—comprising Collaborator Selection, Topic Selection, Idea Generation, Idea Novelty Assessment, and Abstract Generation—presents a promising and robust framework for idea generation. The evaluation metrics (CD, CI, and HD) are logically sound.\n3. VIRSCI, a multi-agent system for scientific collaboration, shows clear advantages over single-agent methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces VIRSCI, a multi-agent, LLM-based system designed to simulate teamwork-driven scientific research. The system organizes agents to mimic collaborative processes, including selecting collaborators, generating research ideas, assessing novelty, and drafting abstracts."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Generating the abstract is a solid starting point, but conducting at least some preliminary experiments based on the abstract would add greater impact.\n2. There is no metric to assess the practical feasibility of the abstract or idea. While VIRSCI may generate highly unique or novel ideas, these are less valuable if experimental designs cannot support them within practical constraints.\n3. The system’s effectiveness may vary with the underlying LLM’s capabilities."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Limitation of the dataset: I like the idea of the collaborative approach of the agents in generating research ideas, but I am curious about the model’s potential for generalization as the knowledge bank expands to include papers from additional domains and recent years. The current threshold, limited to authors from 2000 to 2014, may overlook recent growth in CS publications. Could you discuss how extending the dataset to include more contemporary research might impact the model’s ability to generalize and its applicability beyond historical data?\n2. Validity of the ideas: the authors propose three metrics for measuring their VIRSCI’s performances compared to AI Scientist, but those mainly focus on the novelty and the potential impact of the idea by comparing embedding distances. However, since this is a research idea generation system and if we potentially want to put it into actual use, we would not only care about the novelty of the idea but also the feasibility and validity of the idea, if the idea makes sense, and how reasonable they are, which is currently missing in the currently proposed metrics. The whole paper puts a lot of emphasis on measuring the novelty of the ideas but does not mention if they manually review if the outputs make sense, there is a chance that the idea is novel but is due to the model's hallucinations. \n3. Practical use of the tool: Given that this framework is proposed as a system for generating research ideas, I am curious about its practical value beyond generating novel concepts. While the focus on novelty is valuable, real-world research applications require ideas to be both feasible and sound. Could you expand on how this system might be used practically and how you see it balancing novelty with the reliability or reasonableness of its outputs? This could be done with human evaluation, or comparing generated ideas with more recent papers and see if there are actual matches, etc."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Novel framework with a new dataset and metric: The authors propose a novel research generation framework with LLMs collaborating on this task through a 'team discussion' mechanism to simulate discussion scenarios in real life. With abstracts as the novel ideas output of the model, the authors also construct a benchmark dataset and evaluation metrics to measure the performances. \n2. Comprehensive experiment design: the authors consider multiple aspects of the framework, for example, the # of team members, components of the system, etc. would make a difference to the result as an ablation study."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces VIRSCI, an LLM-based multi-agent system, a framework specifically designed to model the teamwork collaboration style in scientific research. This is a framework of multiple agents collectively working on research idea generation made up of five phases, similar to how a human would do. To do so, this paper proposes a novel team discussion mechanism based on a paper database as the knowledge bank, leading to generating abstracts and ideas from the LLM agents. To evaluate this framework, the authors also introduce a benchmark focusing on the novelty of the idea to measure performances of their model from three aspects."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited domain and scope in the benchmarking dataset: the authors only include computer science as the scientific research domain in the dataset and with a limited number of years. Given the rapid development in the field, this limited temporal scope may fail to capture the latest developments and trends, especially since the number of papers and ideas is not growing linearly. Expanding the dataset to include a broader range of years or domains would provide a more comprehensive foundation for idea generation and make this paper more sound.\n2. Limited evaluation via a self-defined metric focused on novelty: the paper evaluates the model's performance mainly through a novelty metric derived from embedding distances and based on citation counts. While novelty is important, it should not be the only focus of research idea generation. Additionally, semantic comparisons are made with papers from only 2000-2010 and 2010-2014, which may not fully represent this field’s progress. To better validate their metric, the authors could supply human evaluation or case studies to validate that semantic distance accurately reflects the quality of this metric."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "**Reliability and Misrepresentation of Scientific Findings**:\n\n**Lack of Human Oversight**: The absence of human expert evaluation in validating the generated ideas and their novelty may lead to the propagation of inaccurate or misleading scientific concepts. This could have negative implications if such ideas are considered without critical scrutiny.\n\n**Ethical Responsibility in Scientific Research**: The automation of scientific idea generation should be approached with caution to prevent the dissemination of unvetted or erroneous information, which could affect the scientific community's trust and the progress of research."
},
"flag_for_ethics_review": {
"value": [
"Yes, Potentially harmful insights, methodologies and applications"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Suggestions for Improvement:\n\n**Address Data Leakage Concerns**: To mitigate the impact of data leakage, the authors should consider using more recent or entirely new datasets that the LLMs have not been exposed to during training. Alternatively, they could implement techniques to control for data overlap and assess how much the models' prior knowledge influences the results.\n\n**Include Human Evaluation**: Incorporating human annotators or expert reviewers in the evaluation process would enhance the validity of the findings. Human judgments could corroborate the LLM-based assessments and provide nuanced insights into the quality and novelty of the ideas.\n\n**Highlight Methodological Innovations**: The authors should emphasize any unique aspects of their framework that advance multi-agent systems for scientific research. Detailed explanations of innovative strategies or mechanisms would strengthen the contribution."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "**Novel Exploration of Team Size and Idea Novelty**: The paper provides an insightful exploration into how team size affects the novelty of generated ideas. It demonstrates that there is an optimal team size (e.g., eight members) that maximizes creativity without overwhelming the collaborative process. This finding is interesting and contributes to the understanding of team dynamics in scientific idea generation.\n\n**Rich Experiments and Ablation Studies**: The authors conduct extensive experiments and ablation studies, examining various factors such as team size, team freshness, research diversity, and discussion patterns. This comprehensive approach strengthens the validity of their conclusions and provides valuable insights into key variables affecting multi-agent systems in research.\n\n**Simulation of Collaborative Scientific Processes**: The paper presents a structured framework that simulates the collaborative process of scientific research, from team formation to idea generation and evaluation. This approach aligns closely with real-world scientific practices and showcases how LLM-based multi-agent systems can be applied in this context."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces VIRSCI, a Large Language Model (LLM)-based multi-agent system designed to simulate the collaborative nature of scientific research teams. The system comprises virtual scientists that engage in collaborative discussions to generate, evaluate, and refine research ideas. The authors aim to replicate the teamwork inherent in scientific discovery and explore how multi-agent collaboration can enhance the novelty and impact of generated scientific ideas."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Possible Data Leakage Affecting Novelty Evaluation**: In Section 4.1, the authors utilize a dataset comprising papers published between 2000 and 2014, with a particular emphasis on data from 2011 to 2014. Given that contemporary LLMs may have been trained on data from this period, there is a significant risk of data leakage. This means the models might have already been exposed to the data used in the experiments, potentially generating ideas that are not genuinely novel but rather reproductions of existing concepts. This compromises the ability to accurately assess the novelty of the generated ideas and undermines the validity of the experimental results.\n\n**Lack of Human Evaluation to Validate LLM Metrics**: The paper relies heavily on LLM-based evaluations (e.g., using GPT-4) to assess the quality and novelty of the generated ideas and abstracts. However, there is no involvement of human annotators or experts to validate these metrics. Without human evaluation, it is difficult to ascertain the effectiveness and reliability of the LLM review metrics. The absence of human validation raises concerns about the robustness of the conclusions drawn from these evaluations.\n\n**Limited Innovation in the Framework**: The proposed framework seems to be an application of existing LLM-based multi-agent systems to the specific domain of scientific idea generation. While the application is interesting, the methodological innovation appears limited. The framework primarily extends basic LLM multi-agent methodologies without introducing significant novel approaches or mechanisms specific to the challenges of research idea generation."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a multi-agent system that has the potential to improve scientific idea generation, suggesting promising avenues for exploring collaborative mechanisms in scientific research."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024two,\ntitle={Two Heads Are Better Than One: A Multi-Agent System Has the Potential to Improve Scientific Idea Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yYQLvofQ1k},\nnote={under review}\n}"
},
"abstract": {
"value": "The rapid advancement of scientific progress requires innovative tools that can accelerate discovery. While recent AI methods, particularly large language models (LLMs), have shown promise in tasks such as hypothesis generation and experimental design, they fall short in replicating the collaborative nature of real-world scientific practices, where diverse teams of experts work together to tackle complex problems. To address the limitation, we propose an LLM-based multi-agent system, i.e., Virtual Scientists (VirSci), designed to mimic the teamwork inherent in scientific research. VirSci organizes a team of agents to collaboratively generate, evaluate, and refine research ideas. Through comprehensive experiments, we demonstrate that this multi-agent approach outperforms the state-of-the-art method in producing novel and impactful scientific ideas, showing potential in aligning with key insights in the Science of Science field. Our findings suggest that integrating collaborative agents can lead to more innovative scientific outputs, offering a robust system for autonomous scientific discovery."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Model",
"Multi-agent System",
"Collaboration Strategy",
"Automatic Scientific Discovery",
"Science of Science"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e88cc532513f77de82edc9ae9e346142581f8e9c.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Two Heads Are Better Than One: A Multi-Agent System Has the Potential to Improve Scientific Idea Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yYZbZGo4ei | Accelerating Diffusion Transformers with Token-wise Feature Caching | main | Active | Diffusion Models;Image generation;Video generation;Model Acceleration;Feature Cache | generative models | 5;5;6;6 | 4;5;4;4 | 3;3;3;3 | 3;2;2;3 | 3;3;3;3 | 5.5 | 4.25 | 3 | 2.5 | 3 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In table 2, ToCa does not outperform PAB with 1.24x speedup (78.34 vs 78.51). Although ToCA achieves a more significant speed-up, it is crucial to preserve the generation quality. I am wondering if ToCa can have better generation quality using a more conservative setting that has similar speed-up to PAB?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The visualizations of temporal redundancy and error propagation illustrate the motivation of ToCa in an intuitive way, making the design choices relatively well motivated.\n- The four token selection scoring functions and layer-specific cache ratios are natural design choices that enable the caching strategy to be more fine-grained\n- ToCa achieves more than 2x acceleration ratios on both text-to-image and text-to-video tasks, while having better quality than baselines.\n- ToCa’s training-free nature makes it more practical for real-world application."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces ToCa (Token-wise feature Caching), a training-free feature caching method tailored to accelerate diffusion transformers. ToCa allows for adaptive token selection, which aims to cache tokens with minimal caching error based on various criterions. Experiments have been conducted on PixArt-α, OpenSora, and DiT models to show the speedup and quality trade-off of ToCa."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While ToCa achieves reasonable benchmark results, some artifacts remain in the generated images compared to the originals. For instance, in Figure 6, the moon is missing from the \"wolf howling at the full moon\" prompt, and the background forests appear blurred in the \"tranquil beach\" prompt. It is necessary to demonstrate how ToCa performs with high-resolution images (1024x1024) generated by more advanced models like FLUX.1-dev [1].\n- Another important direction in accelerating diffusion models involves reducing the required sampling steps. However, this might compromise ToCa's effectiveness since fewer steps may result in greater feature variations between steps, potentially making them less suitable for caching. Please include experimental results using models with reduced sampling steps, such as FLUX.1-schnell [2] with 4 steps.\n- For advanced model evaluation, FID and CLIP scores may not adequately reflect human preferences. Please provide quantitative results using more recent metrics, such as image reward [3].\n- The method involves numerous hyperparameters, including four token selection scoring metrics ($\\lambda_1$, $\\lambda_2$, $\\lambda_3$, $\\lambda_4$) and caching ratios for different layers and timesteps ($\\lambda_l$, $\\lambda_{type}$, $\\lambda_t$). While A.3 provides a sensitivity analysis, it remains unclear how these parameters should be determined for new models or tasks. The authors should elaborate on hyperparameter selection and impacts, particularly addressing whether an automatic selection method (e.g., a calibration procedure) exists. The complexity of hyperparameters could limit the method's generalizability and practical application.\n- The \"Acceleration of Diffusion Models\" section lacks references to key fundamental works. Specifically, step distillation [4, 5] and consistency models [6] should be cited under \"reducing the number of sampling timesteps,\" and Q-Diffusion [7] should be cited under \"weight quantization.\"\n- Notation inconsistencies: s3 is generally defined as cache frequency but appears as spatial distribution in Table 4, which uses s4 for cache frequency.\n\n[1] https://huggingface.co/black-forest-labs/FLUX.1-dev \n[2] https://huggingface.co/black-forest-labs/FLUX.1-schnell \n[3] Xu et al. ImageReward: Learning and Evaluating Human Preferences for Text-to-image Generation. NeurIPS 2023. \n[4] Salimans et al. Progressive Distillation for Fast Sampling of Diffusion Models. ICLR 2022. \n[5] Meng et al. On Distillation of Guided Diffusion Models. CVPR 2023. \n[6] Song et al. Consistency Models. ICML 2023. \n[7] Li et al. Q-Diffusion: Quantizing Diffusion Models. ICCV 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The introductory paragraph of the methods section should be rephrased to clarify the concept of the \"naive scheme\" for feature caching. This naive scheme is actually no different from existing approaches, such as DeepCache, FORA, and delta-DiT, which should not be \"we propose\". \n\n2. It should be noted that Figure 3(b) does not depict any low-ratio caching scenarios. \n\n3. There is a discrepancy regarding the application of the scores s1, s2, and s3 across the layers. Specifically, s1 is applicable only to self-attention layers, while s2 pertains solely to cross-attention layers. However, the current definition combines these scores, which raises questions about how this aggregation functions across the different types of layers.\n\n4. Clarification is needed regarding the additional time cost associated with computing the selection scores. Given that the scoring relies on rank computation, there remains a need to calculate and distribute the cached tokens among the uncached tokens. This process is somewhat ambiguous, particularly concerning how the acceleration time is achieved. Presenting the actual extra costs incurred for selecting scores alongside the resulting speedup from the caching process would provide valuable insights."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The motivation behind the proposed approach is both technically sound and clearly explained, supported by two informative figures illustrating the varying levels of similarity among different tokens and the accumulation of error across these tokens. These visual aids effectively convey the rationale for the proposed method.\n\n2. The methodology for selecting scores and making decisions for each layer is novel, offering valuable insights into the acceleration of transformer models. This innovative approach not only enhances the efficiency of the models but also contributes to a deeper understanding of feature caching strategies within the context of transformers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Token-wise Caching (ToCa) is introduced as a fine-grained feature caching strategy designed to accelerate diffusion transformers. It uniquely considers error propagation in caching, utilizing four selection scores to determine the best tokens for caching without incurring extra computational costs. ToCa also allows for variable caching ratios across different layers and integrates multiple techniques to enhance caching efficiency. Extensive experiments on models like PixArt-α, OpenSora, and DiT show that ToCa achieves significant speedups while preserving nearly lossless generation quality."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There appears to be a lack of a complete algorithmic description within the manuscript. Specifically, a detailed explanation is needed for the token selection process at each layer and each timestep, as well as the procedure for redistributing the cached tokens back into the overall framework. This omission could lead to misunderstandings regarding the efficacy and functionality of the proposed method.\n\n2. The manuscript does not adequately illustrate the computation costs associated with the proposed approach, making it challenging to comprehend the additional expenses involved. A clearer breakdown of these costs would enhance the reader's understanding of the method's efficiency.\n\n3. Including a token diagram for specific layers during both early and late timesteps, across various layer types, may provide additional clarity. Such visual representations could effectively illustrate how tokens are managed and utilized within the different stages of the computation, facilitating a better grasp of the overall caching strategy."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Same with my Weakness 1."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written and easy to understand. \n- The observation that the similarity and propagated error differ for each token in DiT is very insightful.\n- Various metrics were proposed to measure the importance of tokens on reusing. \n- Experiments were conducted on a variety of datasets, including not only text-to-image but also text-to-video."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a method called ToCa, which utilizes token-wise feature caching to accelerate the denoising process of DiT. Unlike existing DiT acceleration methods, ToCa offers a fine-grained, token-wise acceleration approach, demonstrating improved performance compared to previous methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It's difficult to understand how token-wise caching leads to actual acceleration. Since the attention layer calculates the similarity among all inputs through matrix multiplication, even if one output token is not computed, there won't be a significant difference in the overall computational cost (similar to unstructured pruning). A more detailed explanation of where this acceleration comes from or breakdown is required. \n- Evaluation is performed with just single Latency/FID point. An evaluation of the Pareto curve with latency/FID would provide more insightful information. \n- There seems to be a color error in Fig 3(b). The yellow arrow indicating low cache ratio is missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the questions in the weakness part."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThe author provides two insightful observations that improve my understanding of diffusion transformers. Firstly, different tokens exhibit varying levels of temporal redundancy across different timesteps. Secondly, the authors highlight that due to error propagation in transformer architectures, the same caching error can lead to vastly different impacts on the final generation result. These observations are the basis of the proposed token-wise feature caching method.\n2.\tUnlike previous works that primarily focused on U-Net architectures, the authors shift their attention to the transformer architecture. This is meaningful as existing methods have not fully exploited the unique properties of transformers, which are increasingly popular for visual generation tasks.\n3.\tThe experimental results demonstrate the performance improvements with high inference speedups.\n4.\tThe authors select the most suitable tokens for caching by leveraging both temporal redundancy and error propagation. The proposed method adaptively caches the tokens that minimize the resulting error while maximizing the acceleration ratio."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors aim to accelerate DiT inference by caching features and reusing them in future steps. The authors observe the limitations of existing methods that ignore the sensitivities to feature caching among different tokens, thus leading to potential degradation of generation quality. Based on this observation, the main idea of this work is to selectively cache suitable tokens and adjust the caching ratios by considering layer types and depths. Evaluations based on three typical DiT models show the performance of model accuracy and overhead saving."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tIn Figure 1, the authors use the Frobenius distance to measure the temporal redundancy of tokens across timesteps. However, it is unclear how this distance is exactly calculated. Providing more details on the calculation process would help readers better understand the experiments.\n2.\tI cannot find a clear formulation for the cache score used in token selection. The authors mention the cache score in Figure 4, but no explicit equation is provided. \n3.\tThe authors update the cache at all timesteps to reduce the error introduced by feature reusing. However, it is unclear how this procedure affects the computation complexity, especially about I/O overhead and cache selection efficiency.\n4.\tThe experiments report the inference speedups but lack a comprehensive analysis of the trade-off between speedups and model accuracy. While FID and CLIP scores are provided in Table 1, these metrics do not fully reflect model accuracy. In addition, Table 3 shows that the proposed method has no significant improvements over existing methods in terms of accuracy and speedups."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a training-free acceleration method for diffusion transformers by caching the features of unimportant tokens while still computing the important tokens."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024accelerating,\ntitle={Accelerating Diffusion Transformers with Token-wise Feature Caching},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yYZbZGo4ei},\nnote={under review}\n}"
},
"abstract": {
"value": "Diffusion transformers have shown significant effectiveness in both image and video synthesis at the expense of huge computation costs. To address this problem, feature caching methods have been introduced to accelerate diffusion transformers by caching the features in previous timesteps and reusing them in the following timesteps. However, previous caching methods ignore that different tokens exhibit different sensitivities to feature caching, and feature caching on some tokens may lead to 10X more destruction to the overall generation quality compared with other tokens. In this paper, we introduce token-wise feature caching, allowing us to adaptively select the most suitable tokens for caching, and further enable us to apply different caching ratios to neural layers in different types and depths. Extensive experiments on PixArt-alpha, OpenSora, and DiT demonstrate our effectiveness in both image and video generation with no requirements for training. For instance, 2.36X and 1.93X acceleration are achieved on OpenSora and PixArt-alpha with almost no drop in generation quality. Codes have been released in the supplementary material and will be released in Github."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion Models",
"Image generation",
"Video generation",
"Model Acceleration",
"Feature Cache"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a1712e395a097e0c3edc7cc3e7fe40a60d4ec1ac.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/72e46cfc754587debb47d438719ad6de80815159.zip"
},
"title": {
"value": "Accelerating Diffusion Transformers with Token-wise Feature Caching"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yYxEFC3Ep4 | Where Do Images Come From? Analyzing Captions to Geographically Profile Datasets | main | Active | geographical profiling;dataset auditing | datasets and benchmarks | 3;5;5 | 4;3;3 | 3;3;3 | 2;2;2 | 4;3;3 | 4.333333 | 3.333333 | 3 | 2 | 3.333333 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Due to limitations in analysis accuracy, this study focuses only on English data. However, how do you anticipate the differences in conclusions between a study limited to English and one that includes multiple languages? Also, where do you see the value in a study that targets only English?\n- It is possigle that biases inherent in the LLMs and image classifiers used in GeoProfiler could influence the results of geographical profiling. What are your thoughts on this issue?\n- This analysis is limited to the LAION2B-EN dataset. Why did you choose not to include multiple datasets in your analysis? Additionally, how do you think the conclusions might differ if a multi-dataset approach were adopted?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed method named GeoProfiler leverages the latest large language models to extract geographical information, enabling context-aware profiling. As a tool proposed for analytical purposes, it holds considerable value. \n- The study presented in this paper is significant in that it quantifies the geographical biases present in datasets, aiding in the creation of more fair and balanced datasets. \n- The analysis is backed by statistical validation, which lends a certain degree of reliability to the presented results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a system called \"GeoProfiler,\" which maps image-caption pairs in vision-language datasets to corresponding countries. Specifically, the study analyzes the geographical distribution of 10 entities using the LAION2B-en dataset, which contains images with English captions. The results show that 46.1% of all captions are underspecified, and the geographical distribution of eight entities follows a power law distribution. Additionally, it was found that countries such as the United States, the United Kingdom, and India are frequently represented, while countries in South America, Africa, and Oceania are underrepresented. Furthermore, an investigation into the country-wise diversity of images for each entity revealed that frequency does not correlate with diversity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The investigation is limited to a dataset with English captions, which may result in a biased assessment of geographical representation. \n- While GeoProfiler is an interesting tool, it remains primarily a methodological approach that combines existing technologies. \n- The potential biases inherent in the LLMs and image classification models used are not deeply explored, and the discussion on these limitations is insufficient."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Questions:\n\n(1) Are there cases where the country can only be inferred based on the image instead of the caption? How would GeoFilter handle that case?\n\n(2) Based on the current findings, what would be the suggestions for addressing the potential biases in the vision-language models?\n\nMinor:\n\n(1) The font size of the text in Table 1 can be reduced so that no hyphen will be needed."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "(1) This study proposed a unique perspective for understanding the biases in vision-language models.\n\n(2) Multiple experiments were conducted to analyze the biases in geographic distributions associated with the image-caption pairs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study proposed GeoProfiler which geographically profiles multimodal datasets by mapping image-caption pairs to countries. GeoProfiler was then applied to geographically profile the English captions of the LAION dataset for 10 common entities. The dataset was not considered diversified because some countries were severely under-represented. A high correlation between a country's GDP and frequency was observed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The contributions of the study were not well articulated. As the authors discussed in the related work section, there have been many efforts in geographically profiling datasets. Some of them focus on visual datasets, and some of them focus on textual datasets. However, the authors claimed that the uniqueness of this study lies in the focus on vision-language datasets. It is unclear why the proposed study is different from the existing methods for profiling datasets such as MS-COCO. \n\n(2) The analysis was only conducted for one dataset. The findings of the study may not be general.\n\n(3) GeoProfiler is composed of many modules. The motivation for each module is clear. However, the study could be more robust and the contributions could be clearer if for each process, the authors could compare their design with baselines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "The paper processes a large-scale dataset (LAION) that is known to suffer from a number of harmful issues. While the authors argue that their sample is small and the probability of including harmful images there does not raise issues, I would expect a more in-depth discussion. Additionally, potential errors in the performance of the GeoProfiler may lead to wrong conclusions regarding the extent/degree of biases in certain datasets."
},
"flag_for_ethics_review": {
"value": [
"Yes, Privacy, security and safety"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- How did the authors decide to select the ten specific entities for their analysis, and do they believe this selection fully captures the diversity in the dataset? Could the limited number of entities impact the generalizability of the findings?\n- How do the authors address the underrepresentation of marginalized or less-documented regions in your analysis? Could this underrepresentation affect the generalizability of the findings?\n- Consider including pre-LLM state-of-the-art methods in the experimental analysis.\n- Consider expanding the multilingual analysis further by addressing the challenges observed with certain languages and exploring more diverse language models or translation techniques.\n- Providing examples or case studies using accessible datasets would enhance the impact and usability of your tool."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The research is methodologically sound and exhibits a good level of rigor. The authors systematically develop GeoProfiler, starting with an analysis of simple baselines like string-matching and Named Entity Recognition (NER), and demonstrating their limitations in this context. By employing the Mixtral-8x7B Instruct model, they achieve a high precision of 0.86 in mapping captions to countries. The application of GeoProfiler to the LAION2B-en dataset is thorough, involving the analysis of 1 million image-caption pairs for each of the 10 selected entities. The statistical analyses, including the correlation with GDP (rho = 0.79) and the power law distribution fitting lead to some insightful findings. The appendix provides extensive methodological details, particularly about the entity-presence filtering process, the creation of the annotation dataset, and the inter-annotator agreements (Appendix A.3).\n- The paper is well-written and clearly structured. Each component of GeoProfiler is explained in detail, and the methodology is presented in detail, facilitating reproducibility. The use of figures and tables, such as the world maps showing the distribution of countries for specific entities and the correlation graphs with GDP, illustrate the key points. Additionally, the authors provide qualitative examples and comprehensive appendices that enhance the reader's understanding of their approach and findings.\n- This work holds significance in the domains of AI bias, data ethics, and responsible AI development. By studying and quantifying the geographical biases present in large-scale datasets like LAION2B-en, the paper provides insights into how such biases can propagate into VLMs and affect their deployment globally. The strong correlation between country representation and GDP indicates socio-economic disparities reflected in AI training data. These findings emphasize the need for more geographically diverse datasets to ensure that AI models are representative of different regions around the world. GeoProfiler could potentially serve as a practical tool for data curators, practitioners, and auditors in their efforts to assess and improve geographical diversity in datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the issue of geographical biases in vision-language models (VLMs) by investigating the geographical origins of images used in training these models. The authors introduce GeoProfiler, a system designed to map image-caption pairs in multimodal datasets to their corresponding countries based on location information extracted from captions. GeoProfiler employs a large language model (Mixtral-8x7B Instruct) to accurately infer countries from captions, achieving a high precision of 0.86 and recall of 0.82.\nThe authors apply GeoProfiler to the LAION2B-en dataset, a large-scale dataset with English captions. They focus on 10 globally relevant entities (e.g., house, flag, car) to analyze the geographical distribution of the data. The key findings echo previous works in this area and include:\n- The geographical distribution of images is highly skewed, following a power law distribution in 8/10 entities. The United States, the United Kingdom, and India are the most represented countries, accounting for 53.7% of the samples. \n- African and South American countries are significantly underrepresented, constituting only 2.0% and 4.3% of the images, respectively\n- There is a strong positive correlation (rho = 0.79) between a country's GDP and its frequency in the dataset, indicating that wealthier countries are more represented.\n- An analysis of the diversity of images from individual countries reveals that a higher number of images does not necessarily imply greater diversity.\n- A 46.1% of captions lack geographical information.\n\nThe paper highlights the limitations of current methods in determining image sources and emphasizes the importance of creating geographically representative datasets. GeoProfiler is a tool aimed to help data curators and practitioners measure and improve the geographical diversity of datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Unfortunately, the authors seem to disregard a large body of pre-LLM work on geotagging multimodal data [1,2,3,4]. Their baseline approaches are quite simplistic (even though generally quite effective), while there are numerous text-based approaches that perform very accurately even at the level of city prediction. As a result, I have strong doubts on the accuracy and efficiency of GeoProfiler compared to pre-LLM state-of-the-art methods [2, 4].\n\n- While the authors have tried to extend their analysis to multilingual captions, the evaluation remains preliminary. The challenges faced suggest that the method may not generalize well to all languages.\n\n- The authors limit their analyses to only 10 entities, which might itself constitute yet another source of bias. It would be interesting to include at least one entity containing humans, which might also offer fruitful ground for more in-depth analysis of representation bias.\n\n- The authors have made some efforts to include and analyze data from underrepresented regions, however, details about these regions remain limited.\n\n- The authors do not delve deeper into socio-economic factors beyond the correlation with GDP.\n\n- No consideration is made of when the images were created/uploaded. The temporal aspect in the analysis could offer additional insights.\n\n[1] Middleton, et al. (2018). Location extraction from social media: Geoparsing, location disambiguation, and geotagging. ACM Transactions on Information Systems (TOIS), 36(4), 1-27.\n\n[2] Kordopatis-Zilos, et al. (2017). Geotagging text content with language models and feature mining. Proceedings of the IEEE, 105(10), 1971-1986.\n\n[3] Luo, et al. (2011). Geotagging in multimedia and computer vision—a survey. Multimedia Tools and Applications, 51, 187-211.\n\n[4] Hu, et al. (2023). Location reference recognition from texts: A survey and comparison. ACM Computing Surveys, 56(5), 1-37."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024where,\ntitle={Where Do Images Come From? Analyzing Captions to Geographically Profile Datasets},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yYxEFC3Ep4},\nnote={under review}\n}"
},
"abstract": {
"value": "Building on studies documenting gender and racial biases in vision-language models, recent works show that such models often fail to generate geographically-representative images that accurately reflect different regions around the world. A common concern is that the data used to train these models is not representative, prompting the question: *which parts of the world do these training examples come from?* To answer this question, we develop a system, *GeoProfiler*, which geographically profiles multimodal datasets by mapping image-caption pairs to countries. Using location information from captions, GeoProfiler maps examples to countries with a high precision ($0.86$). We then apply *GeoProfiler* to geographically profile the English captions of the LAION dataset for $10$ common entities (e.g., house, flag, etc.). We observe the geographical distribution of $8$ entities to obey the power law distribution. The United States, the United Kingdom, and India are most represented, appearing in 53.7% of samples. Problematically, African and South American countries are severely under-represented with only 2.0 % and 4.3 % of images respectively. We also observe a high correlation between a country's GDP and frequency ($\\rho=0.79$). Lastly, we analyze the diversity of images from individual countries, and find that more images does not imply higher diversity."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"geographical profiling",
"dataset auditing"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/848c9645fc4257616bd115120ed351c39014bf04.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Where Do Images Come From? Analyzing Captions to Geographically Profile Datasets"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yZ7sn9pyqb | Generative Monoculture in Large Language Models | main | Active | monoculture;bias;alignment | foundation or frontier models, including LLMs | 1;3;5;6;8 | 5;3;4;4;4 | 2;2;2;3;3 | 4;2;2;3;3 | 3;4;2;3;3 | 4.6 | 4 | 2.4 | 2.8 | 3 | -0.261712 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I would like authors to comment on feedback loop effects. How do they anticipate monoculture evolving as LLMs are increasingly used to generate their own training data in feedback loops ? Do they think this will further exacerbate the monoculture effect. What strategies and/or guardrails do the authors suggest to mitigate this very long term \"model collapse\" situation ?\n\nAuthors could also further comment on composition of training data. Would increased diversity play a role in mitigating feedback loops and how can LLM training incorporate such diversity. \n\nLastly, what role model size plays ? Have authors ablated along model size/complexity dimension ?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper highlights a critical issue in LLMs around narrowing of output diversity compared to the training data. The paper addresses an important problem esp when LLMs are being increasingly applied in diverse fields such as automated product reviews, sentiment analysis, scholarly paper summarization etc. The paper demonstrates the prevalence of narrowing of output diversity, which they refer to as 'generative monoculture'. They consider book reviews and code solutions as two primary use cases to study the narrowing phenomenon. \nThe paper tests various methods to mitigate 'monoculture', including temperature adjustment and prompting strategies. \n\nThe paper provides a well-structured methodology for measuring generative monoculture, including diverse metrics like entropy, mean pairwise similarity and other such metrics. \n\nThe paper discusses the impact of narrowing of output diversity on the societal and code security aspects. These discussions strengthens the paper."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the critical issue of \"generative monoculture\" in large language models (LLMs), where output diversity narrows compared to training data. This is particularly concerning as LLMs are increasingly used in diverse applications like product reviews, sentiment analysis, and scholarly summarization. The study focuses on book reviews and code solutions to demonstrate this phenomenon and tests various mitigation strategies, such as temperature adjustment and prompt engineering, though with limited success."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the paper tests various methods to mitigate 'monoculture', including temperature adjustment and prompting strategies, the attempted countermeasures showed limited efficacy in mitigating narrowing of output diversity. This warrants more experimentation and ideation. I would also think use cases/tasks other than book reviews and code generation should be investigated to test the generalizability of the method. Dialogue / chat bot as an application may be an important area to test these methods on. The inability to redefine the alignment process limits the degrees of freedom with which one could operate. I would have liked to see a bit broader scope that covers alignments and comparing the dispersions to alignment data rather than the training data of CPT. \n\nRich getting richer / Echo Chamber / Feedback loops are commonly studied phenomenon in recommender system literature. Methods like Bandits and Exploration/Exploitation are commonly used to mitigate the homogeneity of recommended items over time. Authors have an opportunity to connect to this rich work and draw inspiration from this field."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What was `{person}` replaced with in the prompt?\n2. I do not understand the method used to generate correct solutions for the coding task. Specifically, I did not understand this sentence (line 309): We instantiated this by generating k samples (100 for GPT-4 and 200 for Claude-3), and verifying that at least 20 of them were correct.\" What did you do if it wasn't the case that 20 were correct? Does your sampling method here ensure then that \"correctness\" will never fall below 20%?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The main idea of this paper is very interesting, and I am glad the authors have done this exploration. The authors have done a good job of discussing nuances around the merits of diversity, and I appreciate their selection of two complementary domains where the value of having diversity is quite different."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The main idea of this paper can be summarized in the following toy example: if 90% of humans say that chocolate is tasty, should language models *always* describe chocolate as tasty, or should they aim to somehow reflect the diversity in human opinions, and occasional refer to chocolate as bad-tasting?\n\nThe authors apply this thought experiment to two domains---book reviews and code implementation---and analyze the various ways in which language models enforce a monoculture by failing to model the diversity present in actual human-written content. Adding more entropy into the decoding pipeline helps a bit, but does not meaningfully bring diversity to human levels.\n\nThe authors include an interesting discussion of the scenarios in which diversity is desirable--for example, generated book reviews ought to reflect the range of opinions real reader mights have on said books---and the scenarious where diversity might be less important---for example, it is more important for generated code to be correct than to be diverse."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "## Primary weakness - incomplete description of methodology\nUnfortunately, it is not possible to assess this paper as it was submitted because crucial information required to understand and reproduce the methodology is purported to be in the appendix, but no appendix was included in the submission. Since the paper is incomplete, there is no choice but to give a score of 1 (strong reject). Despite this, I have tried to leave some constructive feedback below for the authors.\n\n## LLMs for attribute extraction\nLine 163 states that \"care must be taken to use LLMs for attribute extraction, as they are known to be biased towards their own responses.\" It seems like this care was taken for the sentiment classifier (the paper notes the classifier's accuracy on SST-2 and it's widespread usage). However, the same care does not seem to have been taken for the \"efficiency\" attribute, which uses gpt-3.5 to assess runtime efficiency of generated code. It is unclear why a reader should trust that gpt-3.5 is efficient and unbiased at this task.\n\n## Takeaway #2\n\nLine 393 states that humans \"largely prefer text with positive sentiment.\" Is this actually true? In a 5 minute literature review, I found several papers such as the two listed below which suggest a much more complex story. Needless to say, statements such as this one should not be made without citation.\n\nSangwon Park, Juan L. Nicolau. Asymmetric effects of online consumer reviews, ISSN 0160-7383, (https://www.sciencedirect.com/science/article/pii/S0160738314001273).\n\nLotte M. Willemsen, Peter C. Neijens, Fred Bronner, Jan A. de Ridder. “Highly Recommended!” The Content Characteristics and Perceived Usefulness of Online Consumer Reviews, (https://academic.oup.com/jcmc/article/17/1/19/4067647).\n\n## Takeaway #3\n\nTakeaway #3 claims RLHF hurts diversity more than any of the other factors did. This claim would be much stronger if it had been backed by experiments with more than just one pair of models. I would like to see additional experiments with other model pairs (for example instruction-tuned and RLHF'ed OLMO). Otherwise, a caveat should be added to this claim about the limitation of the result.\n\n## Nitpicks\n\nHere are a few nitpicks (which wouldn't much affect my overall assessment).\n1. I am not a fan of the phrase \"human-generated.\" It is extremely atypical to talk about a human generating a book review or generating some code (unless the speaker wants to imply the human is using genAI tools); rather, in common parlance, humans **write** book reviews and **write** code. I suggest replacing instances of \"human-generated\" with \"human-written.\"\n2. I think the paper would be more engaging to read if the description of the attributes of interest was moved before the section on metric calculation (Section 3.3). Section 3.3 felt out of place to read when I didn't yet know what exactly the attributes were for each domain. \n3. Figure 4 has too much information in it, which impeded communication. It would be more effective to break this into multiple figures each with their own caption. In particular, the (a), (b) and (c) in the middle bar graph are especially confusing since (a), (b) and (c) are also used to refer to subfigures. Can you instead put shorthands for the actual names of the models? Also, for the topic model I am confused why the word 'novels' occurs in two groups. \n4. For Figure 5, an easier-to-understand x-axis label for the top-right plot would be \"Self-similarity\" rather than \"Similarity.\"\n5. One additional paper citation for the first paragraph of your Related Work section (\"Forcing Diffuse Distributions out of Language Models\" https://arxiv.org/abs/2404.10859).\n6. Many of the references are formatted incorrectly with missing capitalization in the paper titles."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "None beyond what was raised in weaknesses"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper formalizes the idea of “monoculture”. This idea isn’t wildly novel–it’s intuitive and consistent with other similar ideas such as mode collapse–but to my knowledge there isn’t a clean documentation of it and thus the paper has value in being an official cite for this phenomenon\n\nThe authors focus on measuring monoculture using task-specific notions of salient attributes (e.g., sentiment in book reviews, algorithms in code) which differs meaningfully from measures that use e.g., vocabulary of generations. I think this distinction is meaningful as it's a better measure of the type of distribution shift that will matter in practice if GenAI is widely deployed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper defines and documents the problem of “generative monoculture” in LLMs – that is, the situation in which LLMs produce outputs that are significantly less diverse that what was present in their input training data. The authors focus on diversity over task-specific and intuitive metrics – specifically, the sentiment of book reviews or the algorithms employed in generated code. This is in contrast to measuring over some more systematic but arguably less informative metric such as lexical distributions. The authors find that, across models, the problem of generative monoculture is present and argue that it seems to get worse for models which are “aligned” to human preferences via RLHF.\n\nI really like this paper. It's a nice, intuitive idea that deserves to be highlighted. I think the study was executed well for the most part but would have preferred to see some human evaluations, rather than solely automatic ones. But I think despite this, it warrants publication in the current form."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My primary concern is that the evaluation focuses entirely on automatic metrics. Granted, there are many metrics that the authors use, and they are somewhat diverse. Still, many of the metrics rely on using LLMs themselves (mostly GPT 3.5) to evaluate LLM output. There is something circular (though hard to articulate) about doing this especially given the premise of the paper itself. That is: if we assume LLMs are not good at generating diverse outputs, might we also worry that they aren’t good at recognizing such diversity? Or, said differently, why are we confident that the collapse is due to actual differences in what the LLMs produce, and not differences in what the LLM evaluator can detect? \n\nTo address this, I think the paper should include some evaluations which are determined entirely by human judgments. I.e., ask humans to rate the sentiment rather than asking LLMs to do so, ask humans to evaluate the big-O complexity rather than having GPT do it, etc. I would be dramatically more convinced if the conclusions held up under human eval, rather than just automatic eval. But, admittedly, I do expect the result to hold even if all evaluations were switch to human eval."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Can the authors explain what they mean on line 309: “We instantiated this by generating k samples [...], and verifying that at least 20 of them were correct.” Does this mean 20 samples were checked, and 80 were not? \n* Can the authors provide a reference that shows that humans prefer positive reviews, to support take-away 2?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* **Interesting quantification of generic monoculture:** As the authors also mention in their related work section, the idea that current alignment practices hurt diversity is not necessarily new. However, this work presents a new way of measuring this, which gives additional insights in the output of (aligned) LLMs.\n* **Considerations (pros and cons) of approach are clearly presented, and largely make sense:** The authors clearly mention their considerations for adopting their chosen methodology, in a space where they sometimes had to make some shortcuts or assumptions. For example regarding their data selection and their used metrics.\n* **Authors present negative results, that are insightful:** The presented mitigation strategies do not really help, but this is an insightful finding for the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces the concept of “generative monoculture”, which refers to the phenomena that an LLM's output distribution is less diverse than the original input (data) distribution.\nGenerative monoculture is investigated on two tasks: generating book reviews, and generating coding solutions, across multiple LLMs.\nThe monoculture is measured with a number of metrics (depending on the task): distribution of the mean, entropy and standard deviation, and mean pairwise similarity.\nThe authors find that monoculture exists across the LLMs that are investigated. The authors propose several mitigations, such as changing the sampling parameters, or diversifying the prompts, but this has little impact."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **Investigated datasets are not very large.** This holds especially for the code solutions, where the data is limited to a subset of 100 easy solutions. Although this gives a first impression, I wonder how results hold over larger data samples, and especially when harder problems are included as well.\n* **Coding solutions are checked by GPT-3.5.** The authors give some details in the appendix of the autojudge, but it is not entirely clear to me how quality is ensured. After all, GPT-3.5 needs to check the results of a stronger model (GPT-4) and, as the authors also mention elsewhere in the paper, LLMs tend to be biased towards their own responses."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Q1: Could the authors explain how the plagiarism score is computed in Figure 5 (b)? It seems is not mentioned in paper."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Generative monoculture is a subtle phenomenon that may not be immediately noticeable to users but can have a lasting impact on society, making it an important area of study. The paper is well-written and clearly presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates generative monoculture in LLMs, defined as the reduction in diversity from training data to the model’s outputs. It examines this phenomenon in two tasks: sentiment of book reviews and code generation. The paper found consistent monoculture in all experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1: One major risk in this study is that measuring generative monoculture with “a source dataset that is likely to have been used in training the LLM we wish to investigate” (line 128) could introduce substantial inaccuracies. This approach risks significant biases, undermining the validity of the measurements. I will elaborate below and hope the authors could discuss these risks and present evidence to support validity of their approach.\n\nW1-1: First, the selected source data may not actually be part of the LLM’s training data.\n\nW1-2: Even if the selected source dataset is indeed in the training data, it may not represent the full range of relevant training data. For instance, other sources or non-English reviews of the same books could dominate the distribution from training data, meaning that the measured distribution may not reflect the true distribution of the training data at all. This concern is especially relevant since the study uses relatively small datasets (742 books with 10 reviews per book for book reviews and 100 coding questions with 20 correct solutions each for code).\n\nW1-3: Additionally, the filtering of the source dataset introduces biases. The authors mention in Appendix B2 that book reviews are filtered to be between 300-700 words. Filtering by length could affect the sentiment distribution, potentially skewing the results. \n\nW2: Could the authors clarify why they filter out low-quality (generated) book reviews by examining perplexity? While low-quality reviews may seem less helpful to users, excluding them might introduce bias in the sentiment distribution of LLM-generated book reviews. I recommend testing whether this filtering step influences the measured sentiment distribution, although it might be preferable to retain these low-quality reviews to avoid introducing additional bias.\n\nW3: To strengthen the study’s validity, I suggest measuring generative monoculture in a more controlled environment, where the training data is known and the training distribution can be accurately measured. Without certainty that the source dataset is part of the training data, all findings are at risk of being unreliable.\n\nW4: In Line 317, the efficiency of LLM generated code is measured by ``prompt GPT-3.5 to infer the big O time and space complexity’’. Please establish the reliability of this LLM evaluation.\n\nW5: In line 328, Code Summary (categorical) is measured by “prompt GPT-3.5 to assign tags to a code segment by providing it a set of tags to choose from”. Please establish the reliability of this LLM evaluation.\n\nW6: In Figure 4 (d), decay T p = 1.0 seems to have higher entropy, therefore more diverse, than source distribution. Could the authors discuss this, as it disagrees with the statement in line 379 “there exists significant narrowing from the source to generation distribution in all attributes considered for both scenarios”?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024generative,\ntitle={Generative Monoculture in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yZ7sn9pyqb},\nnote={under review}\n}"
},
"abstract": {
"value": "We introduce {\\em generative monoculture}, a behavior observed in large language models (LLMs) characterized by a significant narrowing of model output diversity relative to available training data for a given task: for example, generating only positive book reviews for books with a mixed reception. While in some cases, generative monoculture enhances performance (e.g., LLMs more often produce efficient code), the dangers are exacerbated in others (e.g., LLMs refuse to share diverse opinions). As LLMs are increasingly used in high-impact settings such as education and web search, careful maintenance of LLM output diversity is essential to ensure a variety of facts and perspectives are preserved over time. We experimentally demonstrate the prevalence of generative monoculture through analysis of book review and code generation tasks, and find that simple countermeasures such as altering sampling or prompting strategies are insufficient to mitigate the behavior. Moreover, our results suggest that the root causes of generative monoculture are likely embedded within the LLM's alignment processes, suggesting a need for developing fine-tuning paradigms that preserve or promote diversity."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"monoculture",
"bias",
"alignment"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/33ec33cb5e6eb799fe4c4c20d1264642799fafc0.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/a7af968dcecad1635b672a1d99b18fbb898d13a8.pdf"
},
"title": {
"value": "Generative Monoculture in Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yZdPpKTO9R | Decision-making with speculative opponent model-aided value function factorization | main | Active | Decision making;Cooperative multi-agent reinforcement learning; | reinforcement learning | 3;5;5;5 | 4;3;3;4 | 2;3;3;2 | 1;2;2;2 | 2;3;2;2 | 4.5 | 3.5 | 2.5 | 1.75 | 2.25 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1. Can you please clarify how Theorem 3.1 differs from Theorem 2 in [1]? I do not see how the addition of the speculative opponent models in the individual agents affects the distributional value function factorisation theorem. \n\nQ2. Can you clarify how or if the DMIX baseline used in this work differs from the DMIX in [1]?\n\nQ3. Can you motivate the choice of training separate models for each opponents, inside each of the learning agents? Did you consider the option of training one single joint model of the opponents in each of the learning agents, that can capture a compressed representation of the opponents behaviour? \n\nQ4. What are the computational costs, compared to DMIX?\n\nQ5. Can you expand further on the sampling process for the opponents joint actions? That seems to be an important bottleneck of the approach, especially since it needs to be done multiple times.\n\nQ6. In the algorithm configuration $\\epsilon$ starts at 1. Is this value decayed?\n\nQ7. Do you have any insights if the assumption of only observing information regarding the opponents through the local observations would be enough to still capture informative models in the case in which the opponents would also undergo a learning process?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Using the additional information provided by distributional returns to also train opponent models is an interesting idea and novel contribution. Also, demonstrating that no additional knowledge apart from the local observation is enough for such auxiliary models is a valuable insight for this line of research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes DSOMIX, Distributional Speculative Opponent-aided MIXing, a multi-agent reinforcement learning (MARL) approach that incorporates opponent modelling into the distributional value function factorization (DFAC) framework applied to the QMIX approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I believe the contribution heavily relies and builds on the distributional value function factorization (DFAC) framework [1], both in terms of theory (the mean-shape decomposition, IGM and DIGM equivalence) and algorithmic design (IQN based implementation, the DMIX baseline), but this is not properly acknowledged. Please see questions Q1 and Q2, to address this issue. \n\nAnother concern regards the scalability of the approach, since each learning agent will consider _p_ opponent models. The ablation studies regarding the number of outputs for the opponent models is interesting, since knowing the opponents actions space size is not always possible. I have further questions related to this below, see Q3 - Q5.\n\nFinally, a crucial limiting factor for the contribution of the work is the fact that the modelled opponents have fixed policies. See Q7.\n\n[1] Sun, W. F., Lee, C. K., See, S., & Lee, C. Y. (2023). A unified framework for factorizing distributional value functions for multi-agent reinforcement learning. Journal of Machine Learning Research, 24(220), 1-32.\n\nMinor remarks:\n- I advise to keep the consistency of the colors for the approaches across figures (Fig 2, DSOMIX is either red or green)\n- Fix notation inconsistencies, example critic parameters are denoted as $\\phi$ (line 107) and then referred to as $\\theta$ in lines 110 - 112\n- typos, for example:\n - line 113 having each agent learns -> learn\n - line 130 Distributions RL explicitly model -> models\n - line 323 the objectives of DSOMIX is -> objective is or objectives are\n - line 357 An empty grid permits any agents -> agent\n - line 370 QMIX is align -> aligned\n - Figure 2 DOMAC? I assume it should be DSOMIX"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "(1) What makes SOM+Distributional RL converges faster than OMIX and DMIX? Can you provide a more detailed analysis or ablation study specifically comparing the convergence rate of DSOMIX, OMIX and DMIX? This could help isolate the factors contributing to faster convergence and provide deeper insights into the synergies between SOM and Distributional RL.\n\n(2) How is the opponent model trained? For example, can you provide loss function used for training the opponent model, the specific training procedure, and how the opponent model interacts with the main agent during training. This would provide a more comprehensive understanding of the opponent modeling approach and its integration into the overall framework."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is a novel combination of DFAC(QMIX+distributional RL) and speculative opponent modeling, considering the conditional probability distribution on opponent’s potential actions. Their empirical experiment section studies effect of introduced modules (SOM)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a method called distributional speculative opponent-aided mixing framework(DSOMIX), which is built upon QMIX, distributional Q, and has a speculative opponent modeling module. Their experiments in Pommerman and predator-prey show their methods achieve higher returns and faster convergence speed than the baseline methods. Their ablation study shows the distributional aspect of the value network and speculative opponent models are necessary for the framework."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The distributional RL + QMIX aspect was introduced in the prior work [1], and Theorem 3.1 appears to be the same as Theorem 2 in [1] but this theorem is introduced in method part, potentially indicating it is an original contribution. Please exiplicitly discuss how Theorem 3.1 is different or related to the Theorem 2 in [1].\n\n[1] Sun, W. F., Lee, C. K., & Lee, C. Y. (2021, July). DFAC framework: Factorizing the value function via quantile mixture for multi-agent distributional Q-learning. In *International Conference on Machine Learning* (pp. 9945-9954). PMLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The experimental comparisons seem limited. Given that DSOMIX includes opponent behavior modeling and requires multiple samples per state, it’s unsurprising that it outperforms Qmix when using episodes as the x-axis. Are there additional results using other metrics, such as wall clock time? Also, have comparisons been made with more recent methods?\n2. Since this work builds on [1], is there an ablation study comparing against [1]?\n\n**Typos:** \n\nIn the caption for Fig. 1, \"An illustration of our **DOMAC** network architecture\" mistakenly uses \"DOMAC,\" which is the method name from [1], instead of this paper's \"DSOMIX\".\n\n**Reference**:\n\n[1] Sun, Jing, et al. \"Decision-Making With Speculative Opponent Models.\" *IEEE Transactions on Neural Networks and Learning Systems* (2024)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper builds on previous work by incorporating value decomposition, enhancing the team coordination game-solving capabilities. Experimental results also demonstrate that the method outperforms Qmix."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper builds upon the foundation laid by [1] (though not explicitly cited) to propose DSOMIX, a speculative opponent modeling framework that enables MARL agents to make decisions based on local information alone."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The methodological contribution is relatively incremental, as it primary contribution is to combine value decomposition with prior work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How can you support the claim \"However, existing approaches typically rely on centralized learning with access to opponent data, and the process of extracting decentralized policies becomes impractical with larger teams.\" in the abstract?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "DSOMIX framework does not rely on access to opponent data during training."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DSOMIX (Distributional Speculative Opponent-aided MIXing), a novel framework for multi-agent reinforcement learning (MARL) that addresses the challenge of opponent-aware decision-making using decentralized policies. By leveraging speculative opponent models, DSOMIX predicts adversarial behaviors based on local observations without requiring access to opponent data during training. Additionally, it incorporates distributional value function factorization to provide more granular return estimates, improving decision quality and convergence speed. The paper is well-supported by theoretical derivations and extensive experiments across benchmarks like MPE and Pommerman, demonstrating superior performance over baselines. While effective, the approach assumes fixed opponent strategies, leaving room for future work on dynamic, non-stationary opponents."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the proposed approach is fundamentally an opponent modeling method, the paper does not compare its performance with any established opponent modeling baselines. This lack of comparison makes it difficult to assess the true effectiveness of DSOMIX relative to existing opponent modeling techniques. \n2. The paper argues in the introduction that prior opponent modeling methods rely on opponent data, which prevents them from obtaining decentralized policies. However, this claim is inaccurate, as there are opponent modeling approaches that can achieve decentralized policies while still utilizing opponent data during training. This misrepresentation weakens the justification for the novelty of the proposed method.\n3. This paper is poorly written, e.g., a pseudo algorithm may help."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This work proposes a novel value-based speculative opponent modeling algorithm that relies solely on local information."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024decisionmaking,\ntitle={Decision-making with speculative opponent model-aided value function factorization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yZdPpKTO9R},\nnote={under review}\n}"
},
"abstract": {
"value": "In many real-world scenarios, teams of agents must coordinate their actions while competing against opponents. Traditional multi-agent reinforcement learning (MARL) approaches often treat opponents as part of the environment, causing controlled agents to overlook the impact of their adversaries. Opponent modeling can enhance an agent’s decision-making by constructing predictive models of other agents. However, existing approaches typically rely on centralized learning with access to opponent data, and the process of extracting decentralized policies becomes impractical with larger teams. To address this issue, we propose the Distributional Speculative Opponent-aided mixing framework (DSOMIX), a novel value-based speculative opponent modeling algorithm that relies solely on local information—namely the agent's own observations, actions, and rewards. DSOMIX uses speculative beliefs to predict the behaviors of unseen opponents, enabling agents to make decisions based on local observations. Additionally, it incorporates distributional value decomposition models to capture a more granular representation of the agent's return distribution, improving the training process for the speculative opponent models. We formally derive a value-based theorem that underpins the training process. Extensive experiments across four challenging MARL benchmarks, including MPE and Pommerman, demonstrate that DSOMIX outperforms state-of-the-art methods, achieving superior performance and faster convergence."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Decision making",
"Cooperative multi-agent reinforcement learning;"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b2eaf3213069b930844404ac6a564a26d341a29c.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Decision-making with speculative opponent model-aided value function factorization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yaOe2xBcLC | NoVo: Norm Voting off Hallucinations with Attention Heads in Large Language Models | main | Active | Hallucination Mitigation;Large Language Models;TruthfulQA;Representation Editing;Multiple Choice Question Answering;Attention Heads | foundation or frontier models, including LLMs | 3;5;8 | 4;4;3 | 2;3;3 | 2;2;3 | 3;4;4 | 5.333333 | 3.666667 | 2.666667 | 2.333333 | 3.666667 | -0.917663 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Authors claim \"Hallucinations in Large Language Models (LLMs) remain a major obstacle, particularly in high-stakes applications where factual accuracy is critical\". Are the benchmarks used in this study representative of the same?\n\nWe see models being adopted to simple applications that involve RAG, QnA etc. or more complex Agentic use cases that involve abilities like function calling, planning etc.\n\nIt is not very clear how the benchmarks help in high-stakes applications and it i very difficult assess the impact of the work.\n\nAnother question that arises is - how critical is MCQ based tasks? should we even solve it?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- NoVo is designed to be lightweight and does not require specialized tools, in-domain training, or external resources. This simplicity allows it to scale effortlessly across diverse datasets and applications, making it practical for real-world deployment.\n\n- The method achieves remarkable accuracy gains, notably setting a new state-of-the-art on the TruthfulQA MC1 benchmark with a 19-point improvement over existing approaches. This demonstrates the effectiveness of NoVo in addressing the critical issue of hallucinations in LLMs.\n\n- NoVo exhibits exceptional generalizability, achieving significant accuracy improvements on over 90% of the 20 diverse datasets evaluated. This indicates the method’s robustness and versatility in handling a wide range of tasks beyond just one specific dataset or domain."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Norm Voting (NoVo), a lightweight method designed to reduce hallucinations in LLMs using attention head norms. \n\n- Norm Voting automatically selects attention head norms that correlate with truth using a simple, inference-only algorithm that operates efficiently with just 30 random samples.\n- These selected norms are then used in a straightforward voting algorithm, enhancing the model's prediction accuracy by treating head norms as an ensemble of weak learners.\n\nNoVo's approach avoids reliance on specialized tools or in-domain training, making it scalable and generalizable. The method achieves state-of-the-art performance on the TruthfulQA MC1 benchmark, surpassing previous methods by at least 19 accuracy points. Additionally, NoVo demonstrates strong generalization across 20 diverse datasets, significantly outperforming existing representation editing and reading techniques, and showcasing its robustness in improving LLM factual accuracy."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the paper presents detailed analysis and shows impressive gains across benchmarks, the applicability of the solution beyond the MCQ type of problems is not obvious.\n- The motivation to solve MCQ type questions is not clear. Why is it very important to solve?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "* In Table 2, very different results are obtained for CICv1 and CICv2. Is there any explanation for why the results differ so much here, or in general why the results vary so much across datasets? \n* What is the variability of results when using different sets of randomly drawn samples for norm selection?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* Empirical results are strong and consistent across models, with substantial improvements over previous methods\n* The method is simple and cheap, requiring no specialized training or tools.\n* Experimental evaluation is comprehensive, testing across 20 diverse datasets. The authors also evaluate generalizability through finetuning and adversarial robustness tests\n* Good error analysis that helps understand the method's capabilities and limitations, including detailed investigation of how different voter types contribute and where/why the method fails"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "* The authors propose a simple, novel technique to improve LLM factual accuracy. They first find truth-correlated attention head norms, then ensemble these with majority voting at inference time to choose answers to MC questions. \n* The authors conduct comprehensive experiments on a diverse range of datasets and use several different models. On many datasets they find massive effects from their intervention, improving on sota by 20+ percentage points in some cases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* While the method is simple, I find the conceptual motivation unclear. The authors posit that for some heads, the L2 norm is correlated to the truthfulness of a sequence. Why? What makes this a reasonable thing to expect, conceptually? \n* These experiments are done exclusively on multiple-choice QA datasets, and it seems that it would not be possible to use this method to reduce hallucination in open-ended generation. \n* Evaluating on a wider range of models would help provide more confidence in the method, especially testing on currently popular models like Llama 3."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors did a good job of observing the behavior between attention heads and multiple-choice accuracy.\n- The figures are well-designed, and the experiments are comprehensive.\n- The hallucination detection task is highly relevant and important today.\n- NoVo shows impressive results in multiple-choice benchmarks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a novel multiple-choice output strategy named NoVo. NoVo identifies correlations between the norms of attention heads at the choice token position and the the accuracy using a small calibration dataset. They leverage this information to make multiple-choice predictions without relying on log-probabilities. The proposed method outperforms various existing hallucination detection methods across different datasets and models. The authors further analyze the voting heads for deeper insights."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- This approach has very limited applicability, being useful only for multiple-choice tasks. Therefore, it should be noted that this is not a generic hallucination reduction technique like existing methods such as Truthx.\n- It is unclear what makes attention heads special in this context. Similar experiments could potentially be conducted using MLP layers. \n\n- The authors only found a correlation between the norms of the heads and truthfulness. However, it is not explained why such a correlation exists or if there is any causal relationship between the two. \n- The direct relationship between the norm of a head and truthfulness is not well-explained. Why should a correlation be expected in the first place? And why use the L2 norm? \n- Out-of-distribution experiments are missing. The authors should conduct experiments where NoVo is calibrated on one dataset and tested on another. \n- I think the authors should choose between two options. They can either convert their observation about norms and truthfulness into a generic algorithm that works for open-ended generations, or they can deeply explore the reasons behind the observed correlation and write an interpretability paper. Another option might be to reformulate the problem as addressing selection bias or improving multi-choice scenarios, similar to [1].\n\n[1] LARGE LANGUAGE MODELS ARE NOT ROBUST MULTIPLE CHOICE SELECTORS, ICLR 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce NoVo for reducing hallucinations, a novel method that is significantly more accurate and generalisable than previous methods, while being simpler to use."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024novo,\ntitle={NoVo: Norm Voting off Hallucinations with Attention Heads in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yaOe2xBcLC},\nnote={under review}\n}"
},
"abstract": {
"value": "Hallucinations in Large Language Models (LLMs) remain a major obstacle, particularly in high-stakes applications where factual accuracy is critical. While representation editing and reading methods have made strides in reducing hallucinations, their heavy reliance on specialised tools and training on in-domain samples, makes them difficult to scale and prone to overfitting. This limits their accuracy gains and generalizability to diverse datasets. This paper presents a lightweight method, Norm Voting (NoVo), which harnesses the untapped potential of attention head norms to dramatically enhance factual accuracy in zero-shot multiple-choice questions (MCQs). NoVo begins by automatically selecting truth-correlated head norms with an efficient, inference-only algorithm using only 30 random samples, allowing NoVo to effortlessly scale to diverse datasets. Afterwards, selected head norms are employed in a simple voting algorithm, which yields significant gains in prediction accuracy. On TruthfulQA MC1, NoVo surpasses the current state-of-the-art and all previous methods by an astounding margin---at least 19 accuracy points. NoVo demonstrates exceptional generalization to 20 diverse datasets, with significant gains in over 90\\% of them, far exceeding all current representation editing and reading methods. NoVo also reveals promising gains to finetuning strategies and building textual adversarial defence. NoVo's effectiveness with head norms opens new frontiers in LLM interpretability, robustness and reliability."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Hallucination Mitigation",
"Large Language Models",
"TruthfulQA",
"Representation Editing",
"Multiple Choice Question Answering",
"Attention Heads"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9c31232be4158f7c0600d4aa81f6c91aabbb52f0.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/49b31a6745fbcfcbd9a9584d90967c79529b25e7.zip"
},
"title": {
"value": "NoVo: Norm Voting off Hallucinations with Attention Heads in Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yaQbTAD2JJ | Language-Image Models with 3D Understanding | main | Active | Multi-modal Large Language Model with 3D Understanding; 3D Image Grounding from Image | foundation or frontier models, including LLMs | 5;6;6;6 | 4;4;4;4 | 3;3;3;3 | 3;3;3;3 | 3;3;3;3 | 5.75 | 4 | 3 | 3 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the weakness.\n\nA few corrections:\nLine 422: Change \"Table 5\" to \"Figure 5.\"\nFigure 5 (bottom right): Why is the performance bar at 32% different between the two sub-figures?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The overall data pipeline is well-structured, extending LLAVA to a 3D data format with large-scale pretraining. It incorporates standardization of 2D/3D data labels (as shown in Fig. 3), unification of model I/O inputs, and the implementation of visual chain-of-thought (CoT) reasoning for step-by-step analysis.\n\n2. The evaluation is comprehensive, covering various tasks such as 3D grounding (Talk2Car, DriveLM-grounding, Indoor Objectron, ARKitScenes, SUN-RGBD), QA (DriveLM-QA), 3D grounding and captioning (LV3D), and 2D grounding (RefCOCO).\n\n3. A good portion of the visualizations effectively enhances understanding across all tasks.\n\n4. Visual CoT achieves a 3.2-point improvement."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces CUBE-LLM, extending LLAVA to 3D using the new LV3D dataset, which unifies 2D and 3D data in a question-answer format. This allows smooth 2D-to-3D generalization without changing the model’s architecture. Instead of specialized design, CUBE-LLM achieves 3D reasoning through diverse data, using iterative 2D predictions to boost 3D accuracy. It sets new benchmarks in 3D grounding on tasks like Talk2Car and DriveLM while staying competitive on standard 2D tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper lacks an in-depth analysis of joint 2D and 3D training. It closely follows LLAVA1.5, with DINOv2 as the vision model and primarily contributes by consolidating 2D and 3D datasets. While 3D performance improvements could add value, the impact seems insufficient for acceptance. I would like more analysis on the effects of excluding 2D and 3D box pretraining on 3D/2D QA/grounding performance.\n\n2. In my view, the Visual CoT would benefit from zooming in on selected parts (SoM [1]) to enhance model understanding with enlarged object details in a second stage. While the 3.2-point performance improvement (Line 423-424) is reasonable, it comes at the cost of additional tokens and computation, similar to test-time augmentation.\n\n[1] Yang, Jianwei, et al. \"Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v.\" arXiv preprint arXiv:2310.11441 (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Will the datasets be released to benefit future research?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors proposed a new MLLM for 3D understanding, *i.e.*, Cube-LLM. To enable joint learning from both 2D and 3D domain knowledge, the authors proposed datasets with unified 2D and 3D formats, as well as training tasks on different data modalities.\n2. Experiments results show the benefit of the proposed data+task training paradigm, with improved performance on 2D and 3D visual grounding, as well as tasks that require certain reasoning.\n3. The proposed framework will enable future research on 2D and 3D reasoning, with a unified interface of 2D and 3D representations. This can enable models to interact with 2D and 3D data, and to perform explicit reasoning on 3D layouts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work the authors proposed a method to extend vision-language models (LLaVA-v1.5) for 3D understanding. Specifically the authors constructed a large pretraining dataset LV3D (by unifying the data formats of multiple previous datasets), and a new Cube-LLM. With data and task scaling the authors show that Cube-LLM achieve improved performance for 2D and 3D grounding tasks. Results also show that Cube-LLM has some 3D reasoning capabilities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Experiments on DriveLM QA is comparing with weak baselines and the reasoning examples on DriveLM are with finetuning. For instance, how is the reasoning capabilities of the proposed Cube-LLM compared to methods with spatial reasoning training data, such as SpatialRGPT and SpatialVLM. I think this is an interesting topic to study the reasoning capabilities of the proposed method but current results are not very promising.\n2. Table 4 shows that Cube-LLM outperforms previous specialist and generalist models. However, Cube-LLM is trained on RefCOCO annotations as stated in Table 1, which makes the results not a fair comparison?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "It would be better to show performance on more 3D reasoning datasets/tasks."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The exploration of LLM-based 3D reasoning is timely and relevant, given the nascent stage of 3D language-vision models compared to their 2D counterparts. One significant contribution lies in the authors’ pipeline for collecting datasets suited for training multimodal LLMs, which addresses the challenge of limited large-scale datasets for 3D tasks.\n\n2. Experiments on two established 3D reasoning benchmarks, Talk2Car and DriveLM, show that the proposed method achieves superior performance, indicating the model’s strength in handling 3D reasoning tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work presents a vision-language model designed for both 2D and 3D understanding tasks. The authors begin by reformatting existing datasets into a standardized structure, representing objects with 2D/3D bounding boxes and constructing multi-round question-answer pairs to facilitate model training. For evaluation, a chain-of-thought prompting technique is applied at the test stage, enabling the model to perform progressive reasoning on 3D tasks from simpler to more complex questions. Experiments conducted on two 3D reasoning datasets—Talk2Car and DriveLM—demonstrate promising performance for the proposed method. Additionally, the method shows competitive results on 2D vision-language (VL) tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The collected dataset primarily focuses on 3D captioning and grounding tasks, which restricts the breadth of 3D capabilities the model can support. Expanding the dataset to include other essential 3D reasoning capabilities, such as depth ordering, neighborhood relationships, object sizing, and directionality, would better align with real-world 3D applications.\n\n2. While the experiments indicate that the proposed method performs effectively for 3D visual grounding, it remains unclear if the model can fully comprehend and reason within the 3D environment.\n\n3. Although this paper focuses on 3D reasoning with LLMs, it could benefit from a more thorough discussion of related recent works in 3D LLMs, such as 3D-LLM, Scene-LLM, Point-LLM, Uni3DL, and 3D visual grounding efforts, including ScanRefer and ReferIt3D.\n\n4. In Figure 3, the input-output formats do not appear to include point_3d as a possible question or answer type, which seems inconsistent with Equations (6) and (7).\n\n5. A recently introduced work, Cambrian, includes a 3D reasoning subset. Evaluating the proposed model on this subset could provide an insightful comparison and demonstrate the robustness of the model."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the section of weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The concept of a 3D visual chain-of-thought is well-founded and logical.\n2. Ablation studies effectively highlight the benefits of both the proposed training dataset LV3D and the visual chain-of-thought approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper focuses on fine-tuning LLAVA using a specially constructed 3D visual chain-of-thought (CoT) prompting data for 3D grounding tasks. The CoT data is created by arranging multiple questions about the same object in an easy-to-hard order, such as progressing from 2D box center prediction to 2D box coordinate generation, and eventually to 3D box coordinate prediction. The effectiveness of this proposed CoT data is demonstrated by training on a mixture of indoor and outdoor datasets and evaluating the results on Talk2Car and DriveLM datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. For the experiments, the manuscript does not provide enough comparison with recent progress in specialists, such as Grounding DINO (ECCV'24), and its follow-ups.\n2. The manuscript employs a virtual camera for handling camera intrinsics. A key advantage of specialized models is their ability to process images with varying resolutions and aspect ratios once trained. Does this capability extend to virtual camera-based MLLMs? If not, what methods do you use to handle inputs with different resolutions and aspect ratios?\n3. The engineering effort involved in curating data on LV3D is commendable. Do you plan to release the relevant code scripts?\n\nMinor Typos: Inodoor -> Indoor, L432."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We train a multi-modal large language model to reason in 3D from RGB Image"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024languageimage,\ntitle={Language-Image Models with 3D Understanding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yaQbTAD2JJ},\nnote={under review}\n}"
},
"abstract": {
"value": "Multi-modal large language models (MLLMs) have shown incredible capabilities in a variety of 2D vision and language tasks. We extend MLLMs’ perceptual capabilities to ground and reason about images in 3-dimensional space. To that end, we first develop a large-scale pretraining dataset for 2D and 3D called LV3D by combining multiple existing 2D and 3D recognition datasets under a common task formulation: as multi-turn question-answering. Next, we introduce a new MLLM named CUBE-LLM and pre-train it on LV3D. We show that pure data scaling makes a strong 3D perception capability without 3D specific architectural design or training objective. CUBE-LLM exhibits intriguing properties similar to LLMs: (1) CUBE-LLM can apply chain-of-thought prompting to improve 3D understanding from 2D context information. (2) CUBE-LLM can follow complex and diverse instructions and adapt to versatile input and output formats. (3) CUBE-LLM can be visually prompted such as 2D box or a set of candidate 3D boxes from specialists. Our experiments on outdoor benchmarks demonstrate that CUBE-LLM significantly outperforms existing baselines by 21.3 points of AP-BEV on the Talk2Car dataset for 3D grounded reasoning and 17.7 points on the DriveLM dataset for complex reasoning about driving scenarios, respectively. CUBE-LLM also shows competitive results in general MLLM benchmarks such as refCOCO for 2D grounding with (87.0) average score, as well as visual question answering benchmarks such as VQAv2, GQA, SQA, POPE, etc. for complex reasoning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multi-modal Large Language Model with 3D Understanding; 3D Image Grounding from Image"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a75f6de2fd64cb94709bd4b7496769b918358922.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/126bb53628d54c4fa2794897213b624ec75d44d4.zip"
},
"title": {
"value": "Language-Image Models with 3D Understanding"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yaR0hqaGbI | Hierarchical Demonstration Order Optimization for Many-shot In-Context Learning | main | Active | In-context learning;Demonstration Order Optimization | foundation or frontier models, including LLMs | 1;3;5;8 | 3;3;4;2 | 1;2;2;4 | 1;2;2;3 | 1;2;3;3 | 4.25 | 3 | 2.25 | 2 | 2.25 | -0.410152 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* L175: (2) Empirically Effective → (3) Empirically Effective\n\n* L184: Could you clarify “optimal ignorance requirement for a predictive family”?\n\n* L226: $\\Pi_1(D)$ and $\\Pi_1(D)$ → $\\Pi_1(D)$ and $\\Pi_2(D)$\n\n* Could you clarify why Theorem 2 can’t be applied to the entire demonstration set and why it is only limited to demonstrations within a cluster?\n\n* what is the sensitivity of HIDO on choice of embedding?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The lack of a precise, quality-measuring metric for demonstration order has been a major challenge in ICL and many-shot learning research, given the demonstration order instability in LLMs. Many existing works rely on human annotation or heuristic metrics, which are either not scalable or lack accuracy. The authors build on V-usable information to develop a theoretical foundation as an order evaluation metric, effectively quantifying the usable information gain from a specific demonstration order. This metric is not only interpretable (based on information content) but also computationally viable and effective. The authors also propose a model agnostic hierarchical optimization technique to find the optimal demonstrations order based on ICL-DOI metric"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "As large language models (LLMs) grow in scale and capability, in-context learning (ICL) and, in particular, many-shot learning have become predominant approaches for machine learning practitioners. This work addresses a significant challenge currently facing ICL and many-shot learning: the dependency of LLM performance on the order of examples within prompts. Existing literature has shown substantial performance variations when examples (or demonstrations) are reordered within a prompt. The authors first propose the ICD-OVI score as a metric for measuring the impact of example order in ICL, building upon V-usable information. They then introduce a hierarchical framework based on clustering and inter- and intra-cluster ordering to enable scalable refinement in the example order space. The HIDO clustering approach effectively searches the permutation space, making ICL-DOI a feasible optimization problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are some weaknesses in the authors' proposed methodology for efficiently finding the “optimal” order of demonstrations in ICL.\n\n1. **Assumption of Probing Set Effectiveness**: The authors assume that “the demonstration order optimized for answer prediction also works well for sample generation.” However, figures in the appendix show that demonstration embeddings and probing set embeddings generated by various LLMs lack a clear decreasing trend, which does not support this assumption.\n\n2. The authors limit their many-shot learning setting to 50 examples, which is considerably smaller than certain state-of-the-art many-shot learning setups that require up to 2,000 shots (see source).\n\n3. **Computation-Performance Trade-offs**: It would be beneficial to see an analysis and experiments exploring the relationship between computational cost (by varying the number of clusters or the number of samples per cluster) and model accuracy.\n\n4. The authors claim that “intra-cluster demonstrations share proximate embeddings, which significantly decreases ICL performance variance when demonstration orders vary.” This claim lacks supporting analysis or experimentation. It is not immediately clear that closer embedding proximity (implying more similar samples) necessarily results in reduced variance in demonstration order."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The problem of determining an optimal ordering of exemplars is a crucial problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In-context learning performance is heavily dependent on examplar order as demonstrated by previous work. This work proposes a score to rank a particular ordering and a hierarchical optimization framework that does not require evaluating every permutation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "## Weaknesses\n- **[Major]** Why are PDO and GlobalE presented as separate baselines? One minimizes the KL-divergence between the distribution of predicted labels on the probe set and a uniform prior and the other maximizes the entropy of the distribution. These are mathematically identical.\n- **[Major]** What is the point of introducing the framework of V-usable information? It is easy to compute the KL divergence between the distribution of labels in your training set and the distribution of predicted labels directly. The problem stated in Eq. 1 can be solved by simply choosing the ordering that attains the lowest loss. \n- **[Major]** What is the interpretation of $\\log_2 P_{\\text{LLM}}(a \\mid \\Pi(\\mathcal{A}) \\oplus \\emptyset)$? From my understanding, you are simply concatenating a bunch of labels and prompting the LLM to get a probability score for the label of a probing example. However, it seems as though prompting the LLM in such a manner can result in gibberish since this type of prompt clearly is very contrived. Why not directly compute an empirical distribution on the true training examples? \n- **[Major]** The framework proposed in this work fixes the order of the exemplars. However, the order should be **query dependent.** Based on the observation of [1], many ICL/RAG works order the exemplars based on their similarity to the query [2,3,4], but this is not discussed or used as a baseline in this work. Compared to LLM inference with long contexts, a simple similarity based reordering should be very cheap.\n- **[Major]** It seems like there is no statistically significant difference in performance in the vast majority of the experiments...\n- **[Minor]** What is the point of using LLM generate probes? I understand that this saves annotation cost, but in the many shot setting where we are already required to annotate ~150 examples, why not manually annotate a few extra samples to use as a validation set? I am skeptical of whether or not the validation set is reliable when it is purely synthetic.\n- **[Minor]** The writing for Section 3 needs significant improvement. In its current state, one must repeatedly look at prior work in order to get any understanding of what the actual problem set up is. See comments below:\n1. Many terms are used without being defined. For example, \"probing samples\" in Line 125 or \"optimal ignorance requirement\" in Line 184 are not terms that are used in the broader ML community, and should be properly defined.\n2. What is $\\pi(i)$ in line 194? This is never defined.\n3. What is the difference between $\\mathcal{A}$ and $A$ in Equation 4? Is this a typo? \n4. It is not mentioned that the \"probing samples\" are LLM generated until line 199.\n\n\n## References\n[1] Lost in the Middle: How Language Models Use Long Contexts (https://arxiv.org/abs/2307.03172)\n[2] In-Context Learning for Text Classification with Many Labels (https://aclanthology.org/2023.genbench-1.14.pdf)\n[3] What Makes Good In-Context Examples for GPT-3? (https://arxiv.org/pdf/2101.06804)\n[4] Retrieval-Augmented Generation for Large Language Models: A Survey (https://arxiv.org/pdf/2312.10997)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "There is a typo on line 176: it should be “**(3)** Empirically Effective”."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper provides a strong and comprehensive theoretical guarantee for their proposed metric for demonstration example ordering.\n2. The experiments show consistent improvement of their method against all the baselines presented.\n3. The paper is clearly written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of demonstration order instability (DOI) in the in-context learning (ICL) of large language models (LLMs). To study this issue, the authors propose an information theory-based metric called ICD-OVI and introduce a hierarchical demonstration order optimization method named HIDO. They validate their method on five LLMs across nine tasks, comparing it with three baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Missing Baselines:** Only three baselines are included in the experiments, and there are other key baselines that need to be compared. For example:\n - [1] provides an information-gain-based metric to evaluate the effect of demonstrations, which also has a strong theoretical foundation.\n - [2] selects demonstration examples based on the embedding space distance between examples and input queries.\nFurther experiments are crucial for enhancing the rigor of this paper.\n\n2. **Outdated Tasks and Datasets:** All the tasks used are text classification tasks, and the datasets are somewhat outdated. Though not a major issue, experiments on newer tasks and datasets (e.g., MMLU, HellaSwag) are necessary for validating the proposed metrics and methods.\n\n[1] Hongfu Liu and Ye Wang. 2023. Towards Informative Few-Shot Prompt with Maximum Information Gain for In-Context Learning. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 15825–15838, Singapore. Association for Computational Linguistics.\n\n[2] Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2022. What Makes Good In-Context Examples for GPT-3?. In Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pages 100–114, Dublin, Ireland and Online. Association for Computational Linguistics."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could you please specify the hardware used (e.g., type of processors/GPUs, memory) and the approximate computation time required to run the proposed HIDO method on the datasets in the experiments? Thank You"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. Innovative Method: The paper introduces HIDO, a novel hierarchical optimization approach that effectively addresses demonstration order instability (ICL-DOI) in many-shot in-context learning, meanwhile reducing the computational complexity of order optimization, a persistent challenge in large language models.\n2. Theoretical Rigor: Introducing ICD-OVI, an information-theoretic metric, provides a robust, quantifiable measure of information gain for different demonstration orders, adding a solid theoretical foundation to the method.\n3. Strong Experimental Validation: Extensive experiments on multiple datasets and models demonstrate that HIDO outperforms baseline methods in both predictive accuracy and stability, highlighting its practical effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces HIDO, a hierarchical optimization method, and ICD-OVI, an information-theoretic metric, to address demonstration order instability (ICL-DOI) in large language models. HIDO reduces computational complexity by separately optimizing order within clusters and between clusters. Meanwhile, ICD-OVI measures the information gain from each order to help select the optimal sequence. Experimental results show that HIDO, with dynamic updates to ICD-OVI, outperforms baselines in both predictive accuracy and stability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the method shows strong results on several datasets, additional validation on a broader range of tasks, especially outside of text classification, would strengthen the claim of its general applicability in many-shot in-context learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a information-theoretical demonstration order quality metric and a hierachical demonstration order optimization framework."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hierarchical,\ntitle={Hierarchical Demonstration Order Optimization for Many-shot In-Context Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yaR0hqaGbI},\nnote={under review}\n}"
},
"abstract": {
"value": "In-Context Learning (ICL) is a technique where large language models (LLMs) leverage multiple demonstrations (i.e., examples) to perform tasks. With the recent expansion of LLM context windows, many-shot ICL (generally with more than 50 demonstrations) can lead to significant performance improvements on a variety of language tasks such as text classification and question answering. Nevertheless, ICL faces demonstration order instability (ICL-DOI), which means that performance varies significantly depending on the order of demonstrations. Moreover, the ICL-DOI phenomenon persists and can sometimes be more pronounced in many-shot ICL, validated by our thorough experimental investigation. Current strategies handling ICL-DOI, however, are not applicable to many-shot ICL, since they cannot overcome two critical challenges: (1) Most metrics measuring the quality of demonstration order rely on subjective judgment, lacking a theoretical foundation to achieve precise quality characterization. These metrics are thus non-applicable to many-shot situations, where the order quality of different orders is less distinguishable due to the limited ability of LLMs to exploit information in long input contexts. (2) The requirement to examine all orders is computationally infeasible due to the combinatorial complexity of the order space in many-shot ICL. To tackle the first challenge, we design a demonstration order evaluation metric based on information theory for measuring order quality, which effectively quantifies the usable information gain of a given demonstration order. To address the second challenge, we propose a hierarchical demonstration order optimization method named HIDO that enables a more refined exploration of the order space, achieving high ICL performance without the need to evaluate all possible orders. Extensive experiments on multiple LLMs and real-world datasets demonstrate that our HIDO method consistently and efficiently outperforms other baselines. Our code can be found at https://anonymous.4open.science/r/HIDO-B2DE/."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"In-context learning",
"Demonstration Order Optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e8ff4fc84213cb74bc550afbfa495186e4cdd692.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Hierarchical Demonstration Order Optimization for Many-shot In-Context Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yaqPf0KAlN | Omni-MATH: A Universal Olympiad Level Mathematic Benchmark for Large Language Models | main | Active | Mathematical Benchmark;LLM Evaluation;Olympic | datasets and benchmarks | 5;5;8;8 | 4;3;4;3 | 2;3;3;3 | 2;2;3;3 | 3;2;3;3 | 6.5 | 3.5 | 2.75 | 2.5 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- What are the education background of the graduate and doctoral students getting involved?\n- In Section 4.2, what the sampling strategy of the 100 subset, are all the difficulty levels covered in this subset?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- A new benchmark contribution that demonstrate the limitation of current progress of math reasoning in large language models, the detailed difficulty information would benefit the further categorization of this benchmark.\n- Careful study over the reliability of the metrics including data leakage and LLM-based judgement results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work constructs Omni-MATH, a challenging math problem set of 4k over more than 33 domains and with more than 10 difficulty levels with difficulty information comes from either the AoPS website or GPT-4o few-shot result. 15 large language models are evaluated on Omni-MATH and it turns out be challenging even for the strongest OpenAI-o1-preview model. Further analysis shows that all models receives certain data leakage and GPT-4o is a reliable judgement model for determine whether the model-generated solution is consistent with the reference answer."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The explanation of the RM-version of Qwen2.5-MATH do not outperform the vanilla version and Qwen2.5-MATH RM@256 does not out perform Qwen2.5-MATH RM@8 needs further elaboration and investigation to unveil the phenomenon."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Regarding data annotation: Could you clarify what is meant by cross-validation in line 203? Moreover, in L831-834, it states, \"Subsequently, we hired 2 annotators to inspect the entire dataset, following a set of principles.\" What are these principles?\n\n- In Table 3, what do the notations \"Prop.\" and \"Avg.D.\" stand for? It seems these are not explained in the caption or main text. Additionally, how should we interpret the results for Qwen-2.5-MATH-instruct? Is the data leakage problem particularly severe here? Since some training sets for NuminaMath and the Qwen-MATH series are derived from AoPS, is there any overlap between this subset and OmniMath?\n\n- Lines 398-400 omit crucial details. For instance, did the 100 samples originate from the same LLM? What are the inter-agreement scores among the human annotators? How many student judges were involved? Are there specific examples where GPT-4o fails as a judge, and what are the reasons for these failures?\n\n- In Section 4.3, you mention investigating few-shot evaluation. Are you evaluating LLMs on OmniMath in a few-shot manner? If so, where are the prompts used for this evaluation?\n\n\nOthers \n\n- L79: should be \"shown in Table 2\". \n\n- L241-L242: we previously surveyed in Figure (where is Figure 4?)"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The dataset's large size and focus on Olympiad-level problems make it a potential resource for evaluating LLMs' mathematical reasoning abilities.\n\nThe investigation into best-of-N scaling and difficulty consistency represent current areas of interest, and the analysis of these part is interesting.\n\nThe paper is well-organized and in good shape.\n\nThe experiments are comprehensive, and the analysis is thorough."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces OmniMath, a new Olympiad-level mathematical reasoning benchmark. It comprises 4,428 math problems, each with an instance-level difficulty classification. The authors utilize GPT-4o as a judge to evaluate the performance of LLMs. Additionally, they have developed Omni-Judge, an open-source answer evaluation LLM designed to serve as a substitute for GPT-4o. The paper conducts some analysis of experiments involving 15 different LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The major concern is about the evaluation. While the authors assert that GPT-4o aligns with human annotations in 98% of cases (L398-399), crucial details are omitted (e.g., whether the 100 samples originated from the same LLM? What are the inter-agreement scores among the human annotators? What is the number of student judges involved? Are there specific examples where GPT-4o fails as a judge and why?) undermining the reliability of this claim.\n\nMoreover, there is a notable contradiction with the results attributed to OmniJudge. According to L74-76, \"Our Omni-Judge achieves over 91% consistency with GPT-4o and 86% consistency with human judgments, providing reliable feedback.\" Let us consider this carefully: suppose we have 100 test examples, and GPT-4o achieves alignment with human judges on 98 of them. With OmniJudge, 91 examples align with GPT-4o, implying that at most only 2 of the 91 might be judged incorrectly by OmniJudge, which would result in at least 89 out of 100 examples aligning with human judges. This seems contradictory with the reported 86% consistency with human judgments. If the results reported for OmniJudge are indeed reliable (as they seem to be, given the detailed information provided in the appendix), then GPT-4o aligns with human judgments at most 95%. This discrepancy calls into question the reliability of the results presented in Table 2.\n\nIn addition, using GPT-4o or OmniJudge as evaluators can be time-consuming and computationally (or monetarily) expensive. A more feasible approach might involve standardizing answer formats, as seen in datasets like MATH, and designing detailed, rule-based answer-cleaning and matching protocols. Model-based judging should primarily serve as a supplementary tool (like Olympic Arena) rather than a primary evaluation method.\n\nThere is also the potential for data leakage, as some models, such as NuminaMath and Qwen2Math, use examples from AoPS as part of their supervised fine-tuning datasets."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Omni-judge is trained with GPT-40 evaluation results, but how to verify if the GPT-4o evaluations are reliable? \nCan the solutions for discrete math problems be written in code and executed to achieve accuracy?\nWhat are the best-of-1 results? \nWhat is the exact number used for \"N\" in Best-of-N?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Omni-MATH targets Olympiad-level mathematics, pushing beyond traditional datasets like GSM8K and MATH, which have reached saturation. This focus on advanced problems adds a valuable, high-difficulty benchmark for assessing and improving LLMs' reasoning abilities.\n\nProblems are categorized into sub-domains and assigned difficulty ratings, allowing for fine-grained performance and error analysis. This detailed structure aids in identifying domain-specific strengths and challenges in model reasoning.\n\nThe study reveals limitations in test-time scaling methods (e.g., Best-of-N) and the impact of domain interactions on model performance. Such findings guide future research on improving LLMs’ mathematical reasoning and scaling capabilities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Omni-MATH, a challenging benchmark designed to assess large language models (LLMs) in mathematical reasoning at the Olympiad level. Omni-MATH contains 4,428 competition-level problems, rigorously annotated across 33 sub-domains and ten difficulty levels. This comprehensive categorization enables a nuanced evaluation of LLM capabilities in complex mathematics.\n\nThe study reveals that existing models, including advanced ones like OpenAI o1-mini, achieve only moderate success on this benchmark, with a maximum accuracy of 60.54%. The findings highlight persistent challenges in areas like discrete mathematics, while models perform relatively better in algebra and calculus. To support this benchmark, the authors introduce Omni-Judge, an open-source evaluator achieving high consistency with human judgment (86%) and GPT-4o (91%)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "More examples of the dataset should be presented in either main paper or appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, I find this to be a strong paper.\n\n- **Clarity**: The writing is clear and easy to follow.\n- **Comprehensive Benchmark**: This paper introduces a comprehensive and in-depth Olympiad-level mathematical benchmark, featuring questions across various domains and difficulty levels.\n- **Model Analysis**: It provides a thorough analysis of current advanced models on this benchmark, offering valuable insights into aspects such as Best-of-N and consistency.\n- **Effective Verifier**: A notable contribution of this paper is the development of an effective verifier, which improves the accuracy of validating predicted answers, addressing the unique challenges of Olympiad-level problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces *Omni-MATH*, a new Olympiad-level mathematical benchmark designed to evaluate the performance of advanced models on competition-level problems. The authors assess various strong models on this benchmark and share insightful findings from their analysis. Additionally, they propose an Olympiad-level verifier to assist in confirming the accuracy of predicted and ground truth answers. Unlike simpler datasets like gsm8k or MATH, Olympiad-level problems require more sophisticated verification techniques beyond rule-based methods due to their complexity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Here are some concerns about this paper:\n\n- **Verifier Training Details**: The details regarding the training of the verifier should be further clarified.\n- **Scaling Test-Time Inference**: In the test-time inference analysis, this paper primarily relies on the best-of-N strategy. While this is generally fine, for more challenging competition-level questions, advanced search algorithms—such as tree search or Monte Carlo Tree Search (MCTS)—may be required. A discussion on these more sophisticated techniques for tackling harder questions would enhance the paper's impact."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a comprehensive and challenging benchmark specifically designed to assess LLMs' mathematical reasoning at the Olympiad level."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024omnimath,\ntitle={Omni-{MATH}: A Universal Olympiad Level Mathematic Benchmark for Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yaqPf0KAlN},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent advancements in large language models (LLMs) have led to significant breakthroughs in mathematical reasoning capabilities. \nHowever, existing benchmarks like GSM8K or MATH are now being solved with high accuracy (e.g., OpenAI o1 achieves 94.8% on MATH dataset), indicating their inadequacy for truly challenging these models. To bridge this gap, we propose a comprehensive and challenging benchmark specifically designed to assess LLMs' mathematical reasoning at the Olympiad level. Unlike existing Olympiad-related benchmarks, our dataset focuses exclusively on mathematics and comprises a vast collection of 4428 competition-level problems with rigorous human annotation. These problems are meticulously categorized into over 33 sub-domains and span more than 10 distinct difficulty levels, enabling a holistic assessment of model performance in Olympiad-mathematical reasoning. Furthermore, we conducted an in-depth analysis based on this benchmark. Our experimental results show that even the most advanced models, OpenAI o1-mini and OpenAI o1-preview, struggle with highly challenging Olympiad-level problems, with 60.54% and 52.55% accuracy, highlighting significant challenges in Olympiad-level mathematical reasoning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Mathematical Benchmark",
"LLM Evaluation",
"Olympic"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8e54b52dc1edb916645cfe3d87560f08b9310b9f.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/0e7e70a5a82c37eff9050e081bc47681a31e4a89.zip"
},
"title": {
"value": "Omni-MATH: A Universal Olympiad Level Mathematic Benchmark for Large Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yarlMUJePB | Energy-Based Discrete Mask Approximation for 3D Molecular Graph Explanation | main | Active | 3D Graph Explanation;3D Molecular Graphs;Energy-Based Models;Discrete Masks | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;3;3;5 | 4;3;4;3;4 | 1;1;3;3;3 | 2;2;2;2;2 | 2;2;3;2;3 | 3.4 | 3.6 | 2.2 | 2 | 2.4 | 0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Additionally, there are some questions regarding the method in the paper:\n\n1) Why was only the QM9 dataset used in the experimental section, and not the GEOM-Drugs dataset? Typically, studies on molecules utilize both of these datasets. \n\n2) How does the soft mask extract the optimal subgraph? Is it determined by setting a threshold manually?\n\n3) There are some issues with Equation 7. What does ‘r’ represent in the equation? Also, for the energy function term, isn't it necessary to sum over all nodes? Additionally, Equation 7 does not specify the parameters to be optimized. Is the EBM function the only part that requires optimization?\n\n4) I suggest adding a flow figure to illustrate the method and training process.\n\n5) In Section 4.3, you presented visualization results based on DimeNet++ backbone. If different backbone, such as SchNet, is used, would the visualization results be the same? Does Graph Explanation only depend on the molecule itself, or is it influenced by the network architecture as well?\n\n6) Why are both a multigraph (with two edges appearing between the black and white atoms in the leftmost image) and an adjacency matrix shown in Figure 4(b)(c)? How can the adjacency matrix represent the adjacency relationships in a multigraph?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This model adds a regularization constraint to the soft mask, encouraging the mask values to be closer to 0 or 1, thereby reducing the uncertainty in the output of effective subgraphs. This sound good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the problem of graph explanation for 3D molecular graphs, specifically predicting which structures within the molecular graph contribute most to the property prediction task. In particular, the graph explanation problem in this paper is equivalent to a mask optimization task. By applying the optimized mask to obscure certain nodes and edges in the graph, the masked graph is then input into the property prediction method. The masked graph that achieves the closest performance to the full graph input represents the most contributive structure. The authors' contribution lies in proposing a new mask optimization method, which uses regularization techniques to make the mask elements closer to 0 or 1, making it easier to identify which parts of the graph should be masked."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main drawbacks of the paper are three: the motivation is not very clear, the contribution is insufficient, and the experimental section is insufficient. The specific reasons for each are as follows:\n\n1)The motivation is not very clear. First, regarding the motivation of the paper, the authors try to address the Graph Explanation task for 3D molecular graphs. However, both 3D and 2D molecular graphs are essentially graphs; 3D molecular graphs may contain more edges, making the explanation slightly more challenging. Therefore, could the authors provide specific molecular examples or quantitative evidence to demonstrate how existing 2D interpretation methods are inadequate when applied to 3D graphs? Besides, based on the visualization results in Figure 4, the authors are still focused on actual bonds and atoms, which can also be achieved with 2D graph explanation. Interpreting extra edges in a 3D molecular graph defined by a cutoff radius (those not representing actual chemical bonds) is also not particularly meaningful. Therefore, could the authors explain how the proposed 3D molecular explanation method handles extra edges that do not represent actual chemical bonds, and why it is necessary to interpret them?\n\n2)The contribution is not enough. First, using regularization to enhance algorithm performance is a very common approach [1,2]. Furthermore, the authors do not explain the advantages of using the EBM function to design the regularization term. For example, directly applying L1 regularization to the mask in Equation 7 could also make the mask values closer to 0 and 1. Could the authors compare their proposed method with directly applying L1 regularization to the mask and clarify the advantages of using the EBM function for the regularization term? Secondly, the purpose of explanation should be to leverage atom contribution to improve property prediction performance, rather than predicting properties using a smaller subgraph. In the experimental section, the MAE for property prediction using subgraphs is often higher than for the original graph. If you cannot achieve better results than the original graph, then what is the purpose of predicting properties using the optimal subgraph? Thus, the suitability of applying Equations 5 and 1 to molecular property prediction is worth considering. If the goal is simply to explain which parts of the molecule contribute more, the hard mask, as defined in traditional Graph Explanation, may not serve this purpose effectively.\n\n3)Finally, the experiment is insufficient. Molecular research experiments typically involve at least two datasets (such as QM9 and GEOM-Drugs [3]) or more, yet this paper only uses the QM9 dataset, making the experimentation inadequate. Therefore, I suggest that the authors include experiments on the GEOM-Drugs dataset. Additionally, the baselines are outdated, with the latest comparison algorithm from 2022. In the experimental section, the authors need to demonstrate that their 3D interpretation method outperforms 2D interpretation methods from 2024, such as [2,4-9]; otherwise, the proposed 3D interpretation method lacks justification. Therefore, I suggest that the authors include at least one recent graph explanation methods like [2,4,5,7,9] as baselines.\n\n[1] Shan C, Shen Y, Zhang Y, et al. Reinforcement learning enhanced explainer for graph neural networks[J]. Advances in Neural Information Processing Systems, 2021, 34: 22523-22533. \n[2] Zhang W, Li X, Nejdl W. Adversarial Mask Explainer for Graph Neural Networks[C]//Proceedings of the ACM on Web Conference 2024. 2024: 861-869. \n[3] Axelrod S, Gomez-Bombarelli R. GEOM, energy-annotated molecular conformations for property prediction and molecular generation[J]. Scientific Data, 2022, 9(1): 185. \n[4] Huang R, Shirani F, Luo D. Factorized explainer for graph neural networks[C]//Proceedings of the AAAI conference on artificial intelligence. 2024, 38(11): 12626-12634. \n[5] Chen T, Qiu D, Wu Y, et al. View-based explanations for graph neural networks[J]. Proceedings of the ACM on Management of Data, 2024, 2(1): 1-27. \n[6] Bui N, Nguyen H T, Nguyen V A, et al. Explaining Graph Neural Networks via Structure-aware Interaction Index[J]. arXiv preprint arXiv:2405.14352, 2024. \n[7] Liu X, Ma Y, Chen D, et al. Towards Embedding Ambiguity-Sensitive Graph Neural Network Explainability[J]. IEEE Transactions on Fuzzy Systems, 2024. \n[8] Homberg S K R, Modlich M L, Menke J, et al. Interpreting Graph Neural Networks with Myerson Values for Cheminformatics Approaches[J]. 2024. \n[9] Chen Y, Bian Y, Han B, et al. Interpretable and Generalizable Graph Learning via Subgraph Multilinear Extension[C]//ICLR 2024 Workshop on Machine Learning for Genomics Explorations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Line 269: last equation: indices do not match and \\mathbb{m}_i is not defined before (only m_i\\in [0,1] is).\n- The author mention in several places that the hyperparameters help “managing the budget” K. How is this done in practice? I would expect that K is dynamically chosen by the method, as it is unclear from the beginning how much of a subgraph is required to explain the prediction. But if this is the case, how do we avoid the trivial solution where the whole graph is retained?\n- What is the variable \\mathbb{r} in equation 7?\n- While focused on 3d graphs, the method applies to graph in any dimensions. If the method is indeed superior to others, it should perform at least on par with the SOTA on 2d graphs as well. Further experiments on graph in other dimensions would benefit the paper.\n- Figure 4, while very explanatory, it is clearly cherry-picked. It would be helpful also to include further examples, also in the appendix, where the method does not perfectly predict the true chemical explanation."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-written, and the authors effectively introduce and motivate their topic and work. The notation is clear and (mostly) correct, while the figures are well-designed and help clarify the content.\n\nThe experiments include both qualitative and quantitative results, as well as an ablation study to assess whether their proposed extra loss term truly affects the method's performance.\n\nThe topic is relevant, as explainability remains a significant challenge, especially in domains like chemistry where even domain experts often don't know the ground truth.\n\nThe paper offers some novelty, although in my opinion it's rather incremental."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose an improvement to instance-level explanation methods for graph neural networks, specifically for 3D graphs. Their approach involves identifying a subgraph—ideally the smallest possible—that produces a prediction similar to the original graph. While minimizing a loss dependent on a pure mask is challenging (partly due to its combinatorial complexity), previous literature has addressed this by relaxing the mask selecting the subgraph to a soft mask. Once the logits are learned, the subgraph can be obtained by taking max(logits) or selecting values above a certain threshold. \n\nTo enhance these methods, the authors introduce an additional loss that pushes the soft masks closer to discrete ones. They achieve this by predicting logits and minimizing a hard concrete-type distribution (eq. 6). This approach encourages the explanation network to predict more \"discrete-type\" values. The authors also provide an energy-based interpretation of their method.\n\nThe authors design experiments to demonstrate that their method outperforms other baselines both quantitatively and qualitatively on the qm9 dataset."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concern is the novelty of the approach (more specific questions about details can be found below in the question section). As I understand it, the authors add a loss term regulating the \"discreteness\" of the masks. These types of losses are ubiquitous in deep learning; for instance, it's common when learning a permutation matrix to learn a soft-relaxed one and enforce bistochasticity during training. \n\nFurthermore, the interpretation of their method as energy-based, while legitimate, seems rather superficial (every exponential distribution can be viewed as an energy function from a statistical mechanics perspective) and appears aimed at aligning the paper with the current popularity of (generative) energy-based models.\n\nI also find that restricting the evaluation to QM9 is somewhat limited nowadays, as larger datasets like Geom-Drugs and QMugs are becoming standard for assessing new methods. This limitation is particularly notable since the authors claim their method is especially useful for scaling to larger graphs, and QM9 contains only small compounds."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The second component of Eq. 5 (discrepancy between soft and discrete masks) is not always overlooked, but PGExplainer has already tried to tackle it, which also gives discrete masks (although edge-level masks, but easily changeable to node-level masks).\n1. Eq. 7 what is $\\bf r$? Why is the specific node i chosen to be included in the loss but not other nodes or the sum of all nodes?\n1. Eq. 7, I ignore $\\bf r$ and suppose that the second term is the sum of all nodes for now. I don't understand why minimizing the loss in Eq. 7 automatically minimizes the second loss in Eq. 5 (discrepancy between soft and discrete masks). The second term in Eq. 7 means the sum of absolute values of the probabilities of each node being included in the important subgraph. However, minimizing this sum of probabilities doesn't require the probabilities to be discrete, i.e., 0 or 1.\n3. It's unclear what is optimized for the loss in Eq. 7 because the optimization problem not explicitly defined. I guess $\\zeta, \\gamma$? And it's not explained how they are optimized.\n2. Eq. 5 is incorrect because left side is graph and right side is loss. You should change to $\\min L \\le L(...) + L(...)$\n4. In row 309 what is $\\phi(\\cdot)$? It is written that \"it extracts logits for class $c$,\" but where the logit here comes from?\n5. Fig. 3 is confusing to me. I guess the line for \"pushing energy\" means that one choose a small $T$ such that the probability becomes close to either 0 or 1? What does the line of budget mean? I guess it means the user wants to get a subgraph with 2 nodes? Anyways, the figure needs better explanation.\n6. Sec 4.4 should be better if the authors can study the method's performance by changing $T$. It seems that the difference between EDMA and EDMA-soft is only the value of $T$. Besides, the values of $T$ for both cases are not given."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The author identified a key challenge in explaining 3D GNNs: the discrepancy between soft and discrete masks. To address this, the proposed method introduces a novel approach using an energy-based function to represent the importance probability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors proposed a novel method for explaining GNN predictions on 3D graphs, such as molecular structures. They introduced a parametrized energy-based function to estimate the probability that a node is important. By adjusting the function’s temperature parameter, they discretize the output probability to either 0 or 1. The method minimizes both the explanation loss and the sum of importance probabilities to identify the most significant subgraph. Experimental results on two prediction tasks from the QM9 dataset, using two GNN models, demonstrate that the proposed method outperforms other baseline approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, the proposed method is poorly presented, lacking clear definitions and containing errors in the equations. More detailed explanations and proofs are needed to demonstrate how the two identified problems are effectively addressed. While the use of an energy-based function is new, the method’s novelty is limited, as it primarily introduces a parameterized explainer without significant advancements beyond that."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What is the difference between the 2D graph explanation results and the 3D graph explanation results?\n\n2. In Figure 4, there are chemical explanations that can be used as ground truth. Can we use the metric to compare with the ground truth?\n\n3. What is the performance of the model to be explained?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. This paper uses the energy-based model to provide a constraint rather than the loss function used in GNNExplainer\n\n2. This paper is well-organized, which makes it easy to follow the main idea."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discusses the challenges and advancements in explaining the decisions of 3D Graph Neural Networks (GNNs), such as in the molecular data. The authors claim that existing explanation methods work well for 2D GNNs but struggle with 3D due to the vast number of edges. The authors propose a new approach to improve explanation accuracy by using energy values to represent an atom's importance in the prediction. By minimizing the error between the predicted and actual subgraphs, their method identifies stable subgraphs with high explanation fidelity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some core ideas are confusing. For example, the difference between 2D GNNs and 3D GNNs is not clear, even though this paper addresses explanations for 3D GNNs. In Section 3.1, Challenge 1 claims that the random graph is not suitable for 3D GNNs, but it is unclear what kind of assumption should be used. Challenge 2, the dense adjacency matrix, is not a problem in 2D graphs with size regularization.\n\n2. Some expressions are confusing. For example, in Figure 1, row b, the meaning of the red dashed lines and why they are needed is not clear.\n\n3. The metric differs from the mainstream definition, according to [1,2]. The experiments are limited to older methods. It would be better to compare it with more recent methods.\n\n\nReference:\n\n[1] Explainability in graph neural networks: A taxonomic survey, TPAMI, 2022\n\n[2] Towards Robust Fidelity for Evaluating Explainability of Graph Neural Networks, ICLR, 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. I recommend the authors provide a more solid argument justifying why 3D explanations are radically different from 2D ones (see weaknesses).\n\n2. I suggest verifying that the commonly used Gumbel activation is less effective than the proposed EDMA.\n\n3. Where does the expert knowledge depicted in the last two columns of Figure 4 come from? I suggest adding proper contextualization.\n\n4. Can the authors comment on the differences between GNNExplainer and the proposed solution? To my understanding, the only two differences are: (i) using node scores rather than edges scores; (ii) using two complementary energy scores to promote explanation discreteness, instead of the Gubmel activation or an element-wise entropy as suggested in [4]. \n\n[4] Gnnexplainer: Generating explanations for graph neural networks. Ying et al. 2019. NeurIPS"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- 3D graphs are likely to have unique features that need to be addressed with property techniques, also from an explainability perspective\n- The presentation and the writing are good\n- **Touches several overlooked aspects in many explainers**, such as the discreteness of the mask, and the difference between edge vs node masks\n- The paper focuses on regression explanation, which **is often overlooked in the XAI literature**."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the problem of devising ad-hoc graph explainers for 3D molecular representation, as prior work focuses on 2D graphs. Along the way, authors propose to use two relevance scores, instead of just one as typically done. This allows them to provide more confident explanations, with explanation scores better aligning to a discrete mask, which is typically desirable."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The differences outlined in Section 3.1 **do not seem to be convincing enough to justify ad-hoc approaches for 3D graphs**. Specifically, *Point 1* hinges on the unsuitability of the edge independence assumption in 2D explanations. While I agree that edge independence is oftentimes a very limiting assumption, I acknowledge that it is unsuitable for many 2D tasks as well (consider for example the case of social interactions, 2D molecules, or transportation networks), therefore not constituting a major difference between 2D and 3D graphs. *Point 2*, instead, highlights that 2D graphs are typically sparse. This is however not always the case, as there might be many scenarios in which also 2D graphs are dense (for example in social interactions, transaction records, or image regions in images). \n\n- Similar to above, lines 253-256 already hold for many 2D settings, making this argument not solid enough to motivate the need for 3D-specific methods.\n\n- The authors claim that previous methods do not account for the discreteness of the explanation mask. This is however oftentimes addressed by the common design choice of using a Gumbel distribution over the explanation scores (PGExplainer, LRI), which naturally pushes activations close to either 0 or 1. Therefore, **the authors should at least show that their proposed strategy to stimulate discreteness is more effective than using a Gumbel activation**.\n\n- **Only Fidelity- is used to evaluate explanation quality**, whereas other metrics are also available. The authors should at least provide both Fidelity- and Fidelity+, as they are complementary [1,2,3]. \n\n\n[1] Explaining the Explainers in Graph Neural Networks: a Comparative Study. Longa et al. 2024. ACM Comput. Surv.\n\n[2] GraphFramEx: Towards Systematic Evaluation of Explainability Methods for Graph Neural Networks. Amara et al. 2022. LoG\n\n[3] Explainability in Graph Neural Networks: A Taxonomic Survey. Yuan et al. 2023. IEEE"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024energybased,\ntitle={Energy-Based Discrete Mask Approximation for 3D Molecular Graph Explanation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yarlMUJePB},\nnote={under review}\n}"
},
"abstract": {
"value": "In recent years, Graph Neural Networks (GNNs) have become a powerful tool for modeling molecular data. To enhance their reliability and interpretability, various explanation methods have been developed to identify key molecular substructures, specifically a set of edges, in the decision-making process. Early work with 2D GNNs represented molecules as graphs with atoms as nodes and bonds as edges, neglecting 3D geometric configurations. While existing explanation methods perform well on 2D GNNs, there is a pressing need for 3D explanation methods tailored for 3D GNNs, which outperform 2D GNNs in many tasks. Current explanation methods struggle with 3D GNNs due to the construction of edges based on cut-off distances in 3D GNNs, resulting in an exponentially large number of edges. We identify the sources of errors in explanations and decompose them into two components based on a derived upper bound between the optimized masks and the actual explanatory subgraph. This gap can be significant, especially for 3D GNNs because of the large number of edges. To achieve optimal explanation fidelity, our method aims to bridge this gap by assigning two energy values to each atom based on its contribution to the prediction: one energy reflects the scenario where this node is important in making the decision, while the other represents the scenario where it is unimportant. In analogy to physics, lower energy values indicate greater stability in the prediction, and thus, we are more confident about the scenario with which it is associated. Our approach strives to push up and down the energies, respectively, to distinguish these two scenarios to simultaneously minimize both components of the derived upper bound of error, enabling us to identify a stable subgraph that maintains high explanation fidelity. Experiments conducted on backbone networks and the QM9 dataset demonstrate the effectiveness of our method in providing accurate and reliable explanations for 3D graphs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D Graph Explanation",
"3D Molecular Graphs",
"Energy-Based Models",
"Discrete Masks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/008598a3a439d306622852cdd19c43f6159f066f.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Energy-Based Discrete Mask Approximation for 3D Molecular Graph Explanation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yatNm6A6sR | OSM+: Cloud-native Open Street Map Data System for City-wide Experiments | main | Active | Global dataset;Traffic prediction;Traffic policy control | datasets and benchmarks | 3;3;3;5 | 3;4;5;4 | 2;2;3;2 | 1;2;2;2 | 2;3;3;2 | 3.5 | 4 | 2.25 | 1.75 | 2.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Additional comparative results with other large-scale traffic datasets, such as [1], would be beneficial. For example, how does OSM+ offer new insights compared to LargeST in the context of traffic prediction tasks?\n\n[1] Liu, Xu, et al. \"Largest: A Benchmark Dataset for Large-Scale Traffic Forecasting.\" Advances in Neural Information Processing Systems 36 (2024).\n\n2. More detailed methods and comprehensive results on the last two use cases. For instance, could the authors provide results that demonstrate the types of analyses or experiments made possible by OSM+?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. OSM+ provides open-source, large-scale road network data, offering an easy-to-use resource for developing various algorithms and advancing research within the related community.\n2. The diverse use cases highlight the usability of OSM+, with particular effectiveness demonstrated in traffic prediction tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a framework for processing large-scale road network data from OpenStreetMap, called OSM+. The framework consists of three components: a road network graph database, computing APIs, and a suite of data converters. Utilizing the processed road network data from OSM+, the authors present three illustrative use cases—traffic prediction, city boundary detection, and traffic policy control—to showcase the framework's usability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The technical contribution of the paper is unclear. From the current description of the graph database and query APIs, the advancements introduced by OSM+ appear limited. Although some improvements are mentioned in Section 3, what specific technical methods does OSM+ propose to achieve these advancements?\n\n2. The first use case on traffic prediction is well-explained; however, more comparative results with other large-scale traffic datasets, such as [1], should be provided to better demonstrate the value of OSM+ for traffic prediction tasks.\n\n[1] Liu, Xu, et al. \"Largest: A Benchmark Dataset for Large-Scale Traffic Forecasting.\" Advances in Neural Information Processing Systems 36 (2024).\n\n3. The results for the other two use cases are overly simplistic. More detailed methods and comprehensive results are needed to demonstrate the effectiveness of OSM+ in these applications. Additionally, it is unclear how OSM+ contributes to research in these two areas. For instance, could the authors provide results that demonstrate the types of analyses or experiments made possible by OSM+?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "The data in this paper is basically derived from OpenStreetMap's database, but the authors did not follow the organization's rules on data copyright.Please see https://www.openstreetmap.org/copyright"
},
"flag_for_ethics_review": {
"value": [
"Yes, Legal compliance (e.g., GDPR, copyright, terms of use)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Existing OSMs are able to provide corresponding query api, which may vary due to different network conditions. However, this paper does not fully explain the advantages and performance improvement of OSM+ over existing open source or commercial map data tools (e.g., OSMnx, etc.), which makes it difficult for readers to understand its relative innovations and improvements clearly."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper reconstructs a worldwide structured road network database based on OpenStreetMap, which is a valuable resource for urban research and can significantly improve the ease and efficiency of related research.\n2. The OSM+ proposed in this paper is able to support a variety of urban research tasks, including traffic prediction, traffic control, and urban boundary detection, which provides opportunities for exploration and experimentation in related areas of research.\n3. The databases and tools provided in this paper help to enable efficient querying and data processing, support large-scale urban study, and reduce the time and computational resource overhead for road network data processing.\n4. This paper supports the use of map data for the fusion of multimodal spatio-temporal data, suitable for modern training of more complex large models and realizing innovative scientific discoveries."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces OSM+, a global road network map database designed to support city-wide research.OSM+ solves the problems of computational resources and format uniformity required for processing large-scale road data in urban research by building a structured database from OpenStreetMap data.OSM+ contains three main components: a global-scale road network map database, a series of automated parallel computing APIs that support efficient node and distance queries, and a data transformation tool for urban research. The paper also describes three application scenarios: a traffic prediction task, a transportation policy control task, and a city boundary detection task. The system reduces data preprocessing time, facilitates the fusion of multimodal spatial-temporal data, supports large model training, and accelerates the process of scientific discovery."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the paper mentions a series of data processing steps, the lack of a detailed description of the data cleaning and transformation process puts the credibility of the data quality into question, and furthermore, the data for the paper appears to be directly imported and then converted to its corresponding data directly through osm2pqsql. https://planet.openstreetmap.org/ .\n2. Although OSM+ provides standardized global road network data, this is based on existing OpenStreetMap data and fails to clearly articulate its distinctive and innovative contribution\n3. While this paper cites applications such as traffic forecasting, transportation policy control, and urban boundary detection, the examples are not sufficiently in-depth to adequately demonstrate the specific ways in which OSM+ significantly outperforms other methods in these applications.\n4. This paper does not provide quantitative comparisons to demonstrate the performance benefits of OSM+ over other existing tools and formats."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What are the differences between the terms \"system\", \"framework\", and \"engine\" as used in this paper?\n2. Is \"window check\" a technical contribution of this paper? From my experience, similar techniques like spatial indexing exist.\n3. Could the authors provide more details on the contribution to storing road networks as graph-structured data? My understanding is that such graph data could be obtained relatively easily using the OSM API and packages like NetworkX. Are there any specific procedures designed for processing this graph data? For example, how many subgraphs are there? Did the authors merge different subgraphs or make other efforts to refine this data?\n4. Could the authors clarify the traffic policy control experiment?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This research showcases a large-scale system that effectively integrates data from multiple sources, demonstrating substantial engineering efforts. I recognize the authors' efforts to build a foundation for urban and AI research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This research presents OSM+, a large-scale road network computing engine built on OpenStreetMap to support urban research applications. OSM+ includes efficient APIs for spatial queries and data converters, facilitating tasks such as traffic prediction, policy control, and boundary detection. The paper demonstrates significant engineering and data processing efforts. However, the contributions and validation in this version of the paper are not clearly articulated, and revisions may be needed in the areas mentioned in the weaknesses and questions sections.\n\nBelow are my initial comments, and I will consider revising my scores if the authors provide additional clarifications and address the identified weaknesses during the discussion period."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper uses varying terms like system, framework, and engine to describe the proposed work, which makes it confusing for me to identify differences between them.\n\n2. The frequent use of the term \"efficient\" throughout the text lacks evidence (i.e., experiments) to support it.\n\n3. Figure 4 is not referenced in the main text.\n\n4. Some logical flow issues arise, such as in Section 4.1: \"However, in recent years, it has been noticed that newly proposed methods can hardly exhibit significant improvements over existing ones. Thus, we are here to propose 31 new city-level datasets.\" Introducing new datasets does not necessarily address marginal improvements in existing methods. Additionally, the main motivation for proposing a large number of datasets seems to be to highlight their dynamic and sparse characteristics. However, there is no statistical evidence demonstrating how sparsity varies between cities compared to classic datasets like PEMS and METR.\n\n5. The spatial and temporal spans of the 31 traffic flow datasets are not clearly outlined, nor is the process for data collection (e.g., traffic flow data) explained.\n\n6. The traffic policy control experiment lacks clarity, and no results for this experiment are presented in the main text.\n\n7. Overall, the experiments section offers limited insights that might benefit future research, aside from the city boundary detection task. The findings in the city boundary detection task seem only loosely related to the system’s contributions. It's unclear how this system differs from existing ones in identifying correlations between road networks and socio-economic indicators.\n\n8. Each contribution mentioned by the authors is not sufficiently addressed:\n- Data-wise: How is this data different from typical OSM data? If efficiency is a key feature of the proposed system, efficiency experiments should be included in the main experiments.\n- System-wise: The system’s uniqueness compared to other systems is not clearly articulated.\n- Application-wise: The definition and scope of applications, such as traffic signal control, are not clearly explained.\n- Benchmark-wise: The motivation behind the benchmark and the relevant dataset statistics are missing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Explain the question in weakness about sensors and the sparsity challenge.\n2. Figure 6 in the paper does not seem to be referenced or explained.What does Figure 6 aim to convey?\n3. Explain the question in weakness about experiment of traffic prediction."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper develops a road network graph database using OpenStreetMap, aiming to address the issues that processed data is often not well-defined or uniformly structured and is typically only used for one-time applications.\n2. Based on the proposed road database, a series of basic computing APIs are introduced to allow efficient node query and distance query.\n3. Customize road network data for typical urban research questions to facilitate experiments on datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper organizes OpenStreetMap and presents a structured global network graph database containing a billion nodes, for better accessibility and availability. The usefulness is demonstrated in three cases, traffic prediction task, city boundary detection task and traffic policy control task. For the extensively studied traffic forecasting task, it has introduced a new benchmark featuring 31 datasets. Overall, the core could be useful for spatial databasing and OSM downstream analysis, but it may be out of the scope of ICLR."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Better data organization could be useful, but the paper lacks novelty, and does not fit ICLR.\n2. The paper mentions the sensors and the sparsity challenge. However, the paper does not provide information on the source of the sensor data, and it is not reflected in the three tables of the databases discussed. Further, the paper lacks a sufficient explanation of how the Sparsity Challenge relates to the advantages of the database .\n3. Figure 6 in the paper does not seem to be referenced or explained. \n4. In the experiment of traffic prediction, the OSM database uses the UTD19 dataset and is compared with the PEMS dataset. But it appears that the advantages of the OSM database are not evident from this comparison. The variance problem probably comes more from UTD19 than from the OSM database."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024osm,\ntitle={{OSM}+: Cloud-native Open Street Map Data System for City-wide Experiments},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yatNm6A6sR},\nnote={under review}\n}"
},
"abstract": {
"value": "Road network data can provide rich information about cities and thus become the base for various urban research. However, processing large-volume world-wide road network data requires intensive computing resources and the processed results might be different to be unified for benchmark downstream tasks. Therefore, in this paper, we process the OpenStreetMap data and release a structured world-wide 1-billion-node road network graph database with high accessibility and usability. We have presented three illustrative use cases, traffic prediction task, city boundary detection task and traffic policy control task. Moreover, for the well-investigated traffic prediction task, we release a new benchmark with 31 datasets, which is much more comprehensive than the previously frequently-used datasets. While for the relatively novel traffic policy control task, we release a new 6 city datasets with much larger scale than the previous datasets. Along with the OSM+ dataset, the release of data converters facilitates the integration of multimodal spatial-temporal data based on map information for large model training, thereby expediting the process of uncovering compelling scientific insights."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Global dataset",
"Traffic prediction",
"Traffic policy control"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c1fe3e0374fb5de3864deb288a29f1de4f2d4de0.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "OSM+: Cloud-native Open Street Map Data System for City-wide Experiments"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yb4QE6b22f | Scaling Wearable Foundation Models | main | Active | Health;Foundation Model;Scaling | foundation or frontier models, including LLMs | 5;5;6;8;8 | 4;5;4;5;3 | 3;2;3;4;3 | 2;2;3;4;4 | 3;3;4;4;3 | 6.4 | 4.2 | 3 | 3 | 3.4 | -0.275839 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "For potential data release, the projects may go through responsible research practice checks."
},
"flag_for_ethics_review": {
"value": [
"Yes, Responsible research practice (e.g., human subjects, data release)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses for detailed description of each points.\n\n1. How the model would generalize to additional wearable activity tracking tasks? or event wearable health tasks?\n2. Would the model and the representation generalize to unknown classes?\n3. Can the authors provide more comparisons with time series models?\n4. Describe the significance of the key take away of the paper. How it benefits the scalability of the model/approach in different settings.\n5. Can the model be use for more diverse down stream tasks?\n6. Clarify if you would open source the data and model?\n7. Polish the write up and compress the text a little bit."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Strengths:\n1. The experiments are conducted on a large scale, and modeling a variety of signals including activities of heart, skin, and motion.\n2. This work empirically shows that the scaling law of modeling can also be applied to the modality of wearable signals, in terms of scaling up computability, size of dataset, and size of model. \n3. The work includes reasonable baseline comparison with vision-based models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposed a foundation model aiming to serve as a general data encoder for wearable time series data. The author leverages signals capturing a variety of physiological activities for pre-training including activities of heart, skin, and motion. The model is evaluated on the tasks including time series imputation, forecasting, and human activity recognition. The author also shows that the scaling law also applied on large models for the modality of wearable signals."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n1. The model has fixed shape input, which raises concern about generalization. For example, it is very common that different devices and scenarios have different sets of sensors with different sampling rate. Some wearables have PPG + inertial sensor data only. Some have other more sophisticated sensors like Galvanic Skin Response and Electrocardiogram. It’s unclear exactly how such a model would work with different sets of input modalities with diverse sampling and data resolution settings. \n\n2. It is not clear why modeling the time series wearable signal as an image only as opposed to other alternatives like spectral representations (fourier transformation, wavelet transformation). Wearable signals are essentially time series, and are more similar to audio modality. What is the intuition behind this design choice? Also, no comparison has been provided.\n[I] Huang, Po-Yao, et al. \"Masked autoencoders that listen.\" Advances in Neural Information Processing Systems 35 (2022): 28708-28720.\n[II] Ansari, Abdul Fatir, et al. \"Chronos: Learning the language of time series.\" arXiv preprint arXiv:2403.07815 (2024).\n\n3. Insufficient description of modeling strategy and motivation with lack of model description. The paper can benefit from adding a more comprehensive comparison with general time series models like Chronos [2].\n\n4. The key take away “the larger the model the better the performance” “the larger the dataset the better the performance” are fairly standard statements in the AI/ML area at this point. The paper can be improved by bringing in more insights with respect to the model size, scalability in different conditions (e.g., resource constraint/edge computing setting for example, or a distributed setting). Also, large offline dataset during training is hard to come by and often comes with significant privacy concerns. So when it comes to scalability, the computational resource and privacy are the main bottleneck. I think a bit more discussion around the key takeaways and the model’s scalability contributions would be important. \n\n5. Only a single real world downstream task of activity recognition has been explored in the current version of the work. While I highly appreciate the large-scale data collection efforts (that enabled the work) and the large computational model, only a single downstream task fails to highlight its capability. The paper can benefit from incorporating other real world mobile health applications such as stress/fatigue modeling with similar wearable data or may be other physiological conditions modeling.\n\n6. Activity recognition is a well studied area. One additional task that could be interesting is whether such a model generalizes across unseen classes (e.g., dancing, cooking, and other activities for daily living). This could further highlight the capabilities of the model.\n\n7. One of the core strengths of the paper is its large dataset. Making it available to the broader community could help research and development in this area. I could not find any mention about open sourcing the dataset. Also, no mention about open sourcing the model either. I understand that often there are IRB restrictions but I think making the model and data available could be a game changer in this area.\n\n8. Relatively minor comment but there are some repetitive statements. The paper can be shrunk a bit and can be made it a bit more compact."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "The paper claims new data of 165k individuals, which should be verified to be approved by an ethical review board.\nThe data anonymization as well should be checked.\nDistribution of the population should report the limited applicability to what type of individuals - inclusion/exclusion criteria in this protocol."
},
"flag_for_ethics_review": {
"value": [
"Yes, Discrimination / bias / fairness concerns",
"Yes, Privacy, security and safety",
"Yes, Legal compliance (e.g., GDPR, copyright, terms of use)",
"Yes, Responsible research practice (e.g., human subjects, data release)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How was decided the feature space?\n2. How many individual training samples per person were used? \n3. Did you use user-stratified splits? Or what is the data split method?\n4. Why are the baseline methods for interpolation and imputation only Mean, NN and Linear?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Several tasks have been defined for multimodal model evaluation: imputation, interpolation\n2. A robust set of activities and extensive dataset for training 165k people.\n3. Comparative analysis across model parameters and data size through an ablation study.\n4. Analysis of multiple tasks: Classification, interpolation, reputation and extrapolation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a foundational model training for daily activity recognition from wrist worn portable devices. It uses a multimodal approach including 26 features out of four modalities (Acc, PPG, skin Temp and conductance).\nThe model presents an improved performance compared to other methods in classification tasks of activities after user-based self-labelled classes (mostly on sports activities), as well as in interpolation and imputation tasks. \nThe paper analyzes the model efficiency, and complexity needed by comparing on number of subjects, and samples needed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. 16 features extracted from PPG, Acceleration, Skin temperature and conductance, and altimetry - seems a very reduced space for learning. \n2. Temporal interpolation at the scale of 1 minute is not a very accurate task for wearable human data, as it only holds under strong assumptions of human behaviour. E.g. continuous activity, unchanged environment, no external inputs, among others.\n3. It needed to be clarified the number of individuals used for training and the methods for data labelling. \n4. Baseline method comparison for imputation, interpolation and extrapolation was rather simple and did not included other advance methods, see 1.\n[1] Maksims Kazijevs, Manar D. Samad. Deep imputation of missing values in time series health data: A review with benchmarking. (2023) Journal of Biomedical Informatics\n5. The title is too broad and uninformative --> this model is trained for sports activity and proof given in that space only of the wrist wearable data."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "As mentioned above."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The data processing and model training steps are clearly outlined, and the study systematically scales the model across key factors such as data volume, computational resources, and model size.\n- The paper includes a variety of tasks that effectively demonstrate the model’s utility across different contexts, enhancing the applicability of the LSM model in both generative and classification domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study scales a multimodal foundation model, LSM, across compute, data, and model size. Using a dataset of 40 million hours from 165,000 individuals, covering heart rate, heart rate variability, electrodermal activity, accelerometer, skin temperature, and altimeter data. The paper built a sensor foundation model and investigated strategies for training such models with considering data size, computation cost and model size.\nThe data processing steps are clearly explained and the the whole framework is tested on a variaty of tasks including generative and classification types of tasks.\nThe authors’ consideration of data processing and task diversity is a valuable contribution to the wearable and ubiquitous computing community, positioning this work as a significant step forward in multimodal sensor modelling. However, there are important areas for improvement, particularly regarding challenges specific to wearable computing and comparisons with state-of-the-art (SOTA) techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Areas for Improvement**\n\n- Background and Related Work: The paper is thin in covering prior works in multimodal wearable models. The chosen model (ViT) and baselines focus on non-wearable modalities, overlooking well-established approaches tailored to multimodal sensor data such as (but not limited to):\n\n> [1] Saeed, A., Ungureanu, V. and Gfeller, B., 2021. Sense and learn: Self-supervision for omnipresent sensors. Machine Learning with Applications, 6, p.100152.\n\n> [2] Deldari, S., Spathis, D., Malekzadeh, M., Kawsar, F., Salim, F.D. and Mathur, A., 2024, March. Crossl: Cross-modal self-supervised learning for time-series through latent masking. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining.\n\n> [3] Haresamudram, H., Essa, I. and Plötz, T., 2021. Contrastive predictive coding for human activity recognition. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies, 5(2), pp.1-26.\n\n\n- Dataset Clarity and Statistical Insights: The paper does not clearly specify the dataset utilized. The statement, “In this paper, we present the results of our scaling experiments on the largest and the most diverse wearable dataset published to date…” implies that the dataset is previously published, yet it lacks explicit details. The authors should clarify the dataset's origin and provide comprehensive information regarding its attributes. Clarification on the dataset’s availability and publication status is necessary.\nDetailed statistical insights, including class distributions and sampling strategies, would enhance understanding of the dataset’s structure. This information would be particularly useful for evaluating how well the model might generalize across various activities and sensor modalities.\n\n- Data Sampling and Task Appropriateness: The paper lacks clarity regarding the time window chosen for training and inference (e.g., It is infered that 5-hour windows is used for both generative and classification tasks). This is a critical point, as activities typically span only a few minutes, making it unclear whether such extended windows are suitable for classification. Details on data sampling strategies and the selection of window sizes should be provided to better assess model performance across tasks.\n\n- Specific Challenges in Ubiquitous Computing: As a foundation model for wearable sensing, LSM should ideally address unique challenges in ubiquitous computing. For example:\n - the authors could discuss handling missing data at various stages (pre-training, fine-tuning, and inference), as highlighted by [2]. This would enhance the model’s robustness for real-world applications.\n - To demonstrate the model’s generalizability, it would be beneficial to extend experiments to include additional datasets covering diverse tasks, devices, and activities to analyse adaptability to a wider range of scenarios in wearable sensing."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "There is a lot of work that directly deal with accelerometer data in scale and recently also PPG data. The claim that these streams are hard to interpret and therefore not in scope - doesn’t seem a valid explanation. I do understand practical constraints in collecting these streams at a high enough sampling rate and so should suffice to explain the use of intermediates such as steps or heart rate.\n\nThe graphs with the scaling laws with the marker size and color simultaneously varying and with different y-axis scales is confusing. In fact, some marker size differences are small enough to not even stand out while accounting for a significant scale increase. Request to use different marker types instead of just size and make it more obvious, and also maintain y axis scale to compare more effectively."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "An original research work focused on a multimodal model with true large scale of data and model size that is comparable to language foundation models. The scaling laws validate proof of principle for this area. Sample applications chosen have practical significance.\n\nThe article is demonstrating high level of original research due to the scaling law contributions. They also gain additional validation by doing so with multi modal sensor streams. There have been other notable work showing the power of pre training on single or multiple sensor streams in the past at sufficient scale but nothing addressing the scaling question.\n\nThe results and analysis were presented with sufficient clarity. Some room for improvement has been suggested in the following section."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors have introduced a multimodal foundation model for sensor data. The input is mix of raw sensor samples and some preprocessed discrete derived samples due to some understandable practical constraints. They have set up a frame work to prove generative and inferential capabilities allowing the model to be regarded as foundational. With a good representative choice of model architecture, scales of data and a diverse set of scaling experiments the authors have derived a first proof of power scaling laws showing validation of feasibility for the concept of sensor foundation models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the work is generally comprehensive, some areas of improvement remain. \n\nThe limited choice of downstream tasks raises questions on the true foundational nature of the model and its few shot capability. It seems important to understand the ability to generalize to tasks beyond classification eg. regression tasks or system identification tasks.\n\nThe choice of generative evaluation goals while interesting fails to address the practical utility of the performance shown, especially with large errors in imputation. Also, there exist sophisticated imputation and interpolation schemes that are more performant comparators eg. MICE which might be the real performance benchmarks for the application. \n\nThe definition of some of the features and preprocessing is incomplete and not in the language of mathematics. Some descriptions are highly redundant eg “Kurtosis is kurtosis”. Recommend use of the language of accurate mathematical description to define the features clearly."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In section 3.1 (data preprocessing) provide an explicit statement of which physiological metrics (\"features\") are calculated on-device vs, which are calculated on the stored centralized data. This section mentions broadly that the raw sensor signals are not stored centrally or on-device (which seems to imply that all of the features are calculated on-device), but whether this is the case should be stated clearly. \n2. Toward the end of section 3.1 (data preprocessing) the authors state “Within each 300-minute window, missing data between valid data points was linearly interpolated, and leading missing minutes were backfilled.” Was the **test set** that was used for evaluating generative tasks free of these missing values that had been infilled by linear interpolation/backfilling? If so, these time points should be excluded from the calculation of generative task performance (MAE/MSE) because they do not represent real measurements (but rather synthetic values created in data preparation using a human-chosen method). Please comment on how this issue was handled. \n3. Typo(?) on line 202. “Kurtosis is the kurtosis of the BFP signal”. Should (I think) be BPF.\n4. Why was the data input length of 5h chosen? What other lengths were considered, and how were these compared to ultimately land on the choice of 5h."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "## This paper represents strong contributions in the following areas: \n\n### Originality: This paper is the first to evaluate self-supervised learning methods on a multi-modal physiological data set at this scale. The authors describe a novel modeling approach as well as a comprehensive description of their ablation experiments. \n\n### Quality: The authors present rigorous, systematic and thorough evaluation of performance on the generative tasks as a function of pre-training data volume, and model size. \n\n### Significance: The analysis of generative tasks is comprehensive and may be of interest both within the problem domain (models for multi-modal data from wearable devices) as well as more broadly. However, the lack of compelling evidence for model generality across a range of downstream tasks may limit the broad interest. \n\n### Clarity: The paper is fairly clear regarding the data source (including featurization), modeling approach, performance analysis and related ablation experiments. Some details (discussed in the section below) are missing, but this is not likely to impact the general conclusions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper describes self-supervision training methodology (infilling of randomly-masked patches) applied to a very large data set of multi-modal time series data collected by wearable devices under natural usage. The authors describe a comprehensive analysis of the model performance as a function of training data volume, compute resources and model complexity across a set of generative tasks (imputation, extrapolation/forecasting and interpolation). Additionally they also evaluate performance of the resulting pre-trained encoder (and compare this with a supervised learning baseline) on a pair of discrimination tasks: binary exercise/non-exercise classification and multi-class activity classification."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "# Brief Summary #\nThis paper had some important weaknesses that are listed briefly below, with more detailed comments and some suggested steps to address these weaknesses following the list:\n\n1. [Most concerning] Lack of variety in downstream task evaluation. The paper evaluates only two closely-related downstream tasks (binary and multi-class activity classification) that utilize the learned representations. Given that one of the main characteristics of ‘Foundation Models’ is that they are readily adaptable to a wide variety of downstream tasks, it is a stretch to consider the LSM model to be a foundation model rather than a tool for time series interpolation/extrapolation. \n2. No analysis of model performance in a manner that is agnostic to the downstream task (e.g. using measures of unsupervised embedding quality)\n3. Incomplete comparison of model performance on downstream tasks against baselines— LSM performance was compared with SL-trained model having the same ViT architecture, but not against other classifier types.\n4. Absence of some details and explanation on the problem setup for downstream classification tasks.\n5. The authors are missing discussion/citation of some past publications that represent closely-related research (learning general-purpose representations from multi-modal physiological time series captured by wearable devices) \n\n\n# Additional Discussion Details #\n\n## Issue 1: [Most concerning] Lack of variety in downstream task evaluation.\nOther than an extensive analysis of the interpolation/imputation/extrapolation tasks (which are useful proxies), the paper evaluates performance on only two closely-related downstream tasks utilizing the LSM embeddings: binary exercise/non-exercise classification, and multi-class activity classification. This makes it hard for the reader to have confidence that LSM can truly be considered a “foundation model”. Per the proposed terminology set forth in Bommasani, et al., ‘On the Opportunities and Risks of Foundation Models’, in addition to being trained via self-supervision on diverse large-scale data, a FM “can be adapted to a wide range of downstream tasks”. Moreover, there would be particular value and impact in demonstrating that the LSM embeddings are useful for ***physiologically-relevant*** downstream tasks. \n\n### Suggestions to address Issue 1:\nThe data source described by the authors contains some additional metadata/labels that could be used as targets for downstream tasks, building evidence for the general-purpose utility of the LSM embeddings. The metadata includes subject-reported age, sex, and weight (possibly BMI, which is listed in table 2a) that can be used as regression and/or classification targets. \n\nOther targets, particularly those with some direct physiological relevance, would also provide additional evidence for general-purpose utility of the model. I recall that Fitbit generates a ‘Cardio Fitness Score' that represents an estimate of VO2max. If VO2max is available it could be utilized as either a regression target or a classification target (simple binning into high/low or tertiles/quartiles). This would be a good choice of task, since there is a first-principles argument that the temporal relationship between physical effort and heart rate response *should* contain information about VO2max. \n\n## Issue 2: No analysis of model performance in a manner that is agnostic to the downstream task\nIn a complement to suggestions for issue 1 above (more downstream task variety) providing some task-agnostic analysis of embedding ‘quality’ would also help the argument that LSM is in fact a general-purpose foundation model. The authors provide some t-SNE visualization of embedding distribution in Figures 8 and 9, but no accompanying quantitative analysis. \n\n### Suggestions to address Issue 2:\nInclude some quantitative measure of how well-distributed the model embeddings are, and/or how well 5h data segments generated by the same individual can be distinguished from segments generated by other individuals. \n\nMultiple measures of embedding “quality” have been reported that are useful for evaluating and comparing models in a manner that is agnostic to the downstream task (for a recent review/comparison, see Tsitsulin, *et al.* ‘Unsupervised Embedding Quality Evaluation’ https://arxiv.org/abs/2305.16562). \n\n## Issue 3: Incomplete comparison of model performance on downstream tasks against baseline.\nThe authors compare the performance of their pre-trained model against a supervised learning baseline for the two downstream classification tasks. However, this baseline (ViT with 100M parameters) may not be optimal, particularly for the relatively simple task of exercise vs. non-exercise binary classification. It’s worth noting that based on the test set summary in Table 2b simply guessing the majority class (non-exercise) every time would produce ~65% accuracy, but the 100M-parameter ViT only achieves 70.9% with supervised learning (Table 3b). \n\n### Suggestions to address Issue 3:\nCompare performance on both downstream classification tasks using an alternate modeling approach such as random forest or regularized logistic regression (or other boilerplate ML classifiers). Given the problem statement (classify a period of time as exercise vs. non-exercise, or by exercise type, using a time series of 27 features), it is possible that RF or LR could outperform the SL ViT. Even if they don’t, it could give an indication of the performance gap between these ‘non-deep’ supervised ML methods and the ViT SL baseline. \n \n## Issue 4: Absence of some details and explanation on the problem setup for downstream classification tasks.\nSection 4.2 on discriminative tasks is very light on details, to the extent that it would be difficult or impossible for other researchers to reproduce the findings (using their own data and models). The data segments are 5 hours long (so presumably only a subset of that time contains the activity/exercise of interest) but it is not clear how the activity period within the segment is labeled or how the classification is evaluated. Tables 3b and 3c report mAP as a performance metric, which makes me think that there is some time segmentation or drawing of boundary-boxes involved in the downstream task, but this is not explained in the text. In contrast, Appendix section B.3 states “This is likely as there are significant periods of walking in the 5-hour inputs, even if the activity is labeled otherwise”, which implies that the entire 5h inputs are assigned a single class label. This is confusing (or at least unclear) to the reader. \n\n### Suggestions to address Issue 4:\nProvide more detail on the downstream classification problem setup, accompanied by a diagram or illustration (at least in the Appendix) that illustrates visually how the model outputs are evaluated against the ground truth labels. \n\n## Issue 5: The authors are missing discussion/citation of some past publications that represent closely-related research. \n\nSome relevant published work that is fairly similar in scope (learning general-purpose representations from multi-modal physiological time series captured by wearable devices) exists, but these are not cited or discussed. One clear example is D. Spathis *et al.*, Self-supervised transfer learning of physiological representations from free-living wearable data (2020, https://arxiv.org/abs/2011.12121) which utilized pretext tasks to pre-train models for multi-modal inputs from wearables (heart rate and raw IMU), then evaluated the resulting embeddings on a variety of downstream tasks. \n\n### Suggestions to address Issue 5:\nAt least include the Spathis *et al.* paper (listed above) among the citations. Given the close match in scope, this paper should probably also be included in Table 1 despite not referring to its work as a “foundation model” (being from 2020, it predates the popularization of the “foundation model”). It may also be worth doing an additional literature review specifically looking for pre-2021 papers that do not use the term “foundation model” but are clearly pursuing the same objective."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present the results of scaling experiments on the largest wearable dataset published to date, with experiments on generative and discriminative tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024scaling,\ntitle={Scaling Wearable Foundation Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yb4QE6b22f},\nnote={under review}\n}"
},
"abstract": {
"value": "Wearable sensors have become ubiquitous thanks to a variety of health tracking features. The resulting continuous and longitudinal measurements from everyday life generate large volumes of data; however, making sense of these observations for scientific and actionable insights is non-trivial. Inspired by the empirical success of generative modeling, where large neural networks learn powerful representations from vast amounts of text, image, video, or audio data, we investigate the scaling properties of sensor foundation models across compute, data, and model size. Using a dataset of up to 40 million hours of in-situ heart rate, heart rate variability, electrodermal activity, accelerometer, skin temperature, and altimeter per-minute data from over 165,000 people, we create LSM, a multimodal foundation model built on the largest wearable-signals dataset with the most extensive range of sensor modalities to date. Our results establish the scaling laws of LSMs for tasks such as imputation, interpolation and extrapolation, both across time and sensor modalities. Moreover, we highlight how LSMs enables sample-efficient downstream learning for tasks like exercise and activity recognition."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Health",
"Foundation Model",
"Scaling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5e7566b18a3df6182f6e74e81862f06d5b92465f.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Scaling Wearable Foundation Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ybFRoGxZjs | ThermalGaussian: Thermal 3D Gaussian Splatting | main | Active | 3D reconstruction; Thermal fild reconstruction; 3D Computer Vision; Machine learning approaches; | applications to computer vision, audio, language, and other modalities | 3;5;6;8;8 | 4;4;4;3;4 | 3;3;3;3;3 | 2;2;2;3;4 | 3;2;3;3;3 | 6 | 3.8 | 3 | 2.6 | 2.8 | -0.527046 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No ethical concerns identified."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can the authors clarify the choice of hyperparameters for multimodal regularization? Are there general guidelines for adjusting them?\n\n- How does the model handle extreme cases, such as highly reflective surfaces or objects at temperatures that differ significantly from ambient conditions?\n\n- Could the RGBT-Scenes dataset be expanded to include data for real-time applications, like moving objects or dynamic scenes?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed approach is novel, especially in integrating thermal and RGB images in 3D reconstruction via Gaussian splatting. And the experimental results are clear, and include diverse evalutaion metrics and ablation studies to validate performance of this methods.\n\nAlso, introducing the RGBT-Scenes dataset is a valuable contribution that can serve as a benchmark for future work in thermal 3D reconstruction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce ThermalGaussian, a multimodal Gaussian technique that renders high-quality RGB & thermal images from new views.\nAlso they introduce a new real-world dataset named RGBT-Scenes, to address the issue of existing problems.\nAuthors propose multimodal regularization to prevent overfitting and show that ThermalGaussian outperforms previous methods by enhancing rendering quality, reducing storage requirements, and improving efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The process of multimodal initialization and the specific configurations for the thermal Gaussian construction are intricate, making the approach challenging to implement without in-depth knowledge. And, although the RGBT-Scenes dataset is comprehensive, details regarding environmental diversity and lighting conditions could strengthen its applicability across broader contexts."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What does „Mix50“ in Figure 2 stand for? Is Eq. (5) applied with $\\beta=0.5$? If so, please leave a note in the caption of Figure 2.\n\n- How sensitive is $\\lambda_{smooth}$? Would it be possible to provide a small ablation study?\n\n- How was $\\lambda$ in Eq. (6) and Eq. (10) set? Also, are these two lambdas the same?\n\n- As far as I understand, MSMG uses the cost function described in Eq. (7), i.e., without multimodal regularization. So, whenever \"Ours_MSMG\" is used in Tables 2 and 3, it refers to MSMG method trained without multimodal regularization. This is in accordance with Table 4, where authors add a dedicated row for \"Ours_MSMG+MR\" which would not be necessary if MSMG uses MR per default (in this case, I would rather expect a row with \"Ours_MSMG w\\o MR\" in Table 4). As this would mean that MR has never been used except in the ablation study, I would guess that per default, Eq. (12) has been used to train MSMG. Please make this more clear (e.g., by replacing Eq. (7) with Eq. (12) and referring to Section 3.4. on how to choose $\\gamma$).\n\n- What cost function is used to train OMMG? Is it Eq. (7)? If so, please move it to the respective paragraph (starting L296).\n\n- In Figures 5 and 6, which strategy does \"Ours\" refer to?\n\n- To be honest, it is quite surprising to me that a conventional chessboard pattern printed on normal paper produces such highly contrasting thermal images. There are numerous works (see, e.g., [6] and references therein) that try to design special calibration objects for thermal cameras. I wonder why all these papers exist when a simple chessboard pattern printed on paper will also work. What are the physical principles behind why a simple chessboard pattern printed on paper works, and how does it compare to more complex calibration objects in terms of accuracy and ease of use?\n\nReferences:\n\n[6] Issac N. Swamidoss et al. Systematic approach for thermal imaging camera calibration for machine vision applications. Optik 247, 2021."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The topic is interesting and of importance to the community, as demonstrated by the large body of recent work [1-5].\n\n- The paper is well-written and easy to follow. Experiments are well described and cared out thoroughly. The ablation study nicely supports all design decisions. All in all, I don’t see any obvious flaws.\n\n- This is (to the best of my knowledge) the first paper to use some kind of scheduling to dynamically adjust the weight that balances the RGB and thermal loss in Eq. (12). The proposed strategy seems plausible to me, and, according to the experimental evaluation, significantly reduces the memory footprint while only slightly decreasing image quality. Due to the reduced number of Gaussians, this strategy also increases rendering speed.\n\n- Compared to prior work [1,3] (but similar to [5]), the method presented in this work is actually capable of *improving* the RGB rendering quality, especially in low-light conditions.\n\n- The paper proposes and evaluates not only a single method but three different strategies to incorporate thermal images into 3DGS, thus can be seen as the 3DGS-based pendant to [1].\n\n- I appreciate the newly collected dataset. Although small-scaled (like all other publicly available RGB+thermal datasets), it is diverse, contains forward-facing and 360-degree scenes, and encompasses indoor and outdoor environments. This is very helpful to the community.\n\nReferences:\n\n[1] Mert Özer et al. Exploring Multi-modal Neural Scene Representations With Applications on Thermal Imaging. ECCVW, 2024.\n\n[2] Yvette Y. Lin et al. ThermalNeRF: Thermal Radiance Fields. ICCP, 2024.\n\n[3] Miriam Hassan et al. ThermoNeRF: Multimodal Neural Radiance Fields for Thermal Novel View Synthesis. arXiv, 2024.\n\n[4] Tianxiang Ye et al. Thermal-NeRF: Neural Radiance Fields from an Infrared Camera. arXiv, 2024.\n\n[5] Jiacong Xu et al. Leveraging Thermal Modality to Enhance Reconstruction in Low-Light Conditions. ECCV, 2024."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method based on 3DGS that jointly learns multi-modal scene representations of RGB and thermal images. In fact, the authors present and evaluate three different strategies to incorporate thermal images into 3DGS: MFTG (based on fine-tuning), MSMG (leverages multi-task learning), and OMMG—the latter yields a single Gaussian, which is achieved by extending 3DGS with spherical harmonics to represent thermal data. Moreover, a regularization term is proposed that accounts for the smooth nature of thermal images, and a new weight scheduling strategy prevents the proposed MSMG from overfitting to a single modality. Based on a newly collected RGB+thermal dataset consisting of ten scenes, the authors could successfully show that their method outperforms 3DGS baselines and a recent NeRF-based method in terms of RGB and thermal rendering quality while requiring less memory."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I am missing citations (and discussions) of recent related works [1,2,5] in the introduction and related work sections.\n\n- I would love to see empirical evidence for the statement in L337: \"we observe that the weight coefficients of a modality align linearly with the Gaussian it ultimately generates\". This is a quite interesting finding that would be much better supported with data.\n\n- Authors claim multiple times (for example in L028, L104, and L536) that they are the first to use 3DGS to model RGB and thermal images. This statement is actually not true and should be revised. The authors of [5] already presented a 3DGS-based method that incorporates RGB and thermal data (see Section 4.5 of the main paper and Section 4 of supplementary).\n\n- In Table 1, the following open-source multi-view RGB+thermal datasets are missing: ThermalMix proposed in [1], the datasets presented in [2], and MVTV proposed in [5]. Why is the proposed dataset better than these datasets? Also, it would be good to provide specific criteria for how multi-view consistency and content richness is evaluated across datasets. This would help clarify the comparison and justify any claims about the proposed dataset's advantages.\n\n- With the high number of recently published RGB+thermal datasets, I think it might make sense to evaluate the introduced method on at least one of these datasets, also to mitigate potential dataset bias. \n\n- In addition, the proposed method could be compared to some more of the recently published NeRF-based methods. There is code available for [2].\n\n- In L097 authors mention that ThermoNeRF [3] reduces RGB rendering quality, while the proposed method improves it (according to Table 3, an average increase of almost 5% in PSNR). I think that is a huge plus for the proposed method; however, this argument would be much stronger if authors would add comparisons with NeRF-based methods to Table 3.\n\n- I would love to see a side-by-side comparison of thermal renderings with and without the smoothness term.\n\n- Although the proposed weight scheduling used in MSMG yields lower storage requirements, the average rendering quality for RGB and thermal data is worse than for OMMG (see Tables 2 and 3; difference higher for thermal renderings, drop of about 1% in PSNR). So, if one wants to maximize rendering quality and therefore uses OMMG, the benefit of having lower memory requirements is gone (see Table 4; OMMG requires about 60% of the storage space of 3DGS in contrast to 8% for MSMG).\n\n- In Section 3.2, three different strategies to obtain camera poses for thermal images are mentioned. What strategy is actually used for the experiments? Also, what strategy performs best? A quantitative comparison would be really helpful, also because MSX seems to perform best (just by looking at Figure 2). The problem I see is that MSX is patented by FLIR, as such it is not available to other cameras. Hence, the practical use of the third strategy is highly limited, and it would be good to know which strategy could be used instead. Do the other strategies in any form make use of MSX images as well?\n\nReferences:\n\nSee above."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "It is necessary to ensure that the dataset was acquired properly."
},
"flag_for_ethics_review": {
"value": [
"Yes, Privacy, security and safety"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Applicability to a Broader Range of Hyperspectral Imaging\n\nWhile the RGBT images presented involve the addition of a single channel, they can broadly be considered a form of hyperspectral imaging, as they incorporate the infrared spectrum. Could the authors discuss the potential for their multimodal approach to be applied to a broader range of hyperspectral imaging? Additionally, I am interested in the authors' views on whether this approach could be extended to studies involving multi-wavelength, multichannel data, and if it could serve as a basis for hyperspectral data analysis.\n\n2. Requirement for Well-Aligned RGB and Thermal Data and Practical Field Applicability \n\nThe proposed method relies on the availability of well-aligned RGB and thermal data, which may limit practical field applications. In cases where precise data alignment is challenging or unfeasible in real-world scenarios, are there any methods the authors could suggest to mitigate this limitation?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Effective Multimodal Learning and Loss Function Design\n\nThe paper introduces an initialization strategy for aligning RGB and thermal data, along with a loss function tailored to thermal characteristics, allowing these modalities to learn complementarily. This approach enables each modality to mitigate the limitations of the other, achieving optimal reconstruction performance.\n\n2. Model Efficiency and Storage Optimization\n\nBy applying multimodal regularization to prevent overfitting and reduce redundant Gaussians, the model reduces storage requirements by approximately 90% compared to conventional methods. This enables high-quality 3D reconstruction while also enhancing storage efficiency and rendering speed.\n\n2. Contribution of the RGBT-Scenes Dataset to the Research Community\n\nThe RGBT-Scenes dataset introduced in this paper provides paired RGB and thermal images across diverse indoor and outdoor scenes, offering a valuable resource for thermal-based 3D reconstruction research. This dataset provides a foundation for researchers to test and expand upon the proposed model, thereby promoting advancements in the field."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an enhanced method that extends the 3D Gaussian Splatting (3DGS) technique through the ThermalGaussian model, allowing for joint learning and rendering of RGB and thermal data. By learning thermal images together with RGB data in a multimodal framework, the approach aims to improve 3D scene reconstruction capabilities across fields where thermal imaging is valuable, such as in military, industrial, and medical applications. The paper presents initialization strategies for aligning and merging RGB and thermal data, a loss function tailored to the characteristics of thermal imaging, and multimodal regularization techniques to prevent overfitting and optimize storage. Additionally, a new dataset, RGBT-Scenes, is introduced, providing research data that includes both RGB and thermal images."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of Thermal Data Description\n\nWhile the provision of a dataset is one of the significant contributions of this paper, it lacks sufficient information for readers to understand the characteristics of thermal data. Including details about general characteristics of infrared imaging, as well as optical or photographic settings used during data collection (e.g., imaging wavelength range, whether exposure values were fixed), would help readers better understand the dataset's properties and enhance reproducibility.\n\n2. Lack of Discussion on Limitations\n\nThe paper focuses on the model's performance but lacks an explicit discussion of its limitations. For example, it would benefit from exploring conditions under which the proposed multimodal regularization and initialization strategies may not perform well. Including such a discussion would allow for a more realistic assessment of the model's applicability and provide important insights for researchers who may wish to build upon this work.\n\n3. Lack of Discussion on Applicability and Future Work\nAlthough the multimodal learning approach for RGBT data proposed in this paper is academically valuable, the discussion on future work is somewhat general. The paper mentions super-resolution and dynamic scene reconstruction as directions for further research, but these are relatively standard research topics rather than areas specifically tailored to advancements in thermal 3D reconstruction."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In the paper, the authors say that thermal imaging is practical task and 3D reconstruction gives significant applications which 2D images cannot provide. The reviewer wonders what is the specific case of that thermal 3D reconstruction is practical than 2D thermal image? There is a logical jump in the connection between the two. This context would be added at the related work 2.1 section.\n\nIn L. 314, the paper denotes thermal equilibrium, and it would be nice to explain what this means.\n\nIn the related work section, both of ThermoNeRF and ThermalNeRF are denoted but why only ThermoNeRF is compared to your method?\n\nThe reviewer suggests that comparison to existing thermal 3D reconstructions and to datasets used in previous studies, such as ThermoNeRF and Thermal-NeRF, would further emphasize the strength of the method itself."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents new thermal-RGB datasets which will be helpful in future research.\n\nThe method shows quality improvement not only for thermal reconstruction but also for RGB reconstruction.\n\nThe method that adds thermal reconstruction on 3DGs is effective and straightforward."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "ThermalGaussian is the first work with thermal 3D reconstruction with 3DGS representation.\n\nThe paper achieves thermal and RGB 3D reconstruction simultaneously, and improves the rendering results both of color and thermal images.\n\nFinally, the paper reduces the model storage cost by 90%."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper proposes three multimodal training methods, which are MFTG, MSMG, and OMMG. The reviewer is confused about the pros and cons of each model, and users are confused about which model to choose. In the quantitative evaluation, the best performing models are distributed across each scene, with no model performing overwhelmingly well.\n\nIn the three multimodal training methods, the insight about why these three design are needed is lacked. In L. 283, the authors say “thermal Gaussian training in the second phase may not fully leverage the information from the color modality”, and there is no evidence about this.\n\nThere is no discussion about the limitations.\n\nIn L.448, there is a type. “Ours” → “ours”"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please respond to the points made in the Weaknesses section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper investigates an interesting problem of incorporating thermal information into 3D reconstruction by improving on the prevalent 3D Gaussian Splatting method. The presentation is clear and easy to follow. The results are also interesting in that both RGB and thermal reconstruction have improved."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of scene reconstruction with both RGB and thermal image inputs. Specifically, 3D Gaussian Splatting is adopted as the baseline method as a replacement to NeRF. To equip 3D Gaussian Splatting with the ability to also learn 3D thermal representations, this paper proposes a series of strategies for multimodal Gaussian reconstruction, including multimodal initialization, three different thermal Gaussians, constraints specific to thermal modalities, and multimodal regularization. The paper also introduces a new dataset designed for thermal 3D reconstruction. \n\nExperiments demonstrate the advantage of the proposed method in the quality of reconstruction, in both RGB and thermal modalities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I do not hold critical concerns to this paper. However, there are some issues that I'd be happy to see clarified. \n\nIn the Multiple Single-Modal Gaussians strategy, it is stated that \"Each Gaussian model is influenced\nnot only by its corresponding input modality but also by others.\" But how so? RGB modality and thermal modality seem to be uncorrelated to me since 3D Gaussians are trained separately for each. \n\nRegarding experiments, is there a specific reason that Thermal-NeRF has not been included in the comparison?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "The use of infrared thermal imaging generates new perspective images and realistic 3D reconstructions, improving both thermal and color image quality while significantly reducing memory requirements."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024thermalgaussian,\ntitle={ThermalGaussian: Thermal 3D Gaussian Splatting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ybFRoGxZjs},\nnote={under review}\n}"
},
"abstract": {
"value": "Thermography is especially valuable for the military and other users of surveillance cameras. Some recent methods based on Neural Radiance Fields (NeRF) are proposed to reconstruct the thermal scenes in 3D from a set of thermal and RGB images. However, unlike NeRF, 3D Gaussian splatting (3DGS) prevails due to its rapid training and real-time rendering. In this work, we propose ThermalGaussian, the first thermal 3DGS approach capable of rendering high-quality images in RGB and thermal modalities. We first calibrate the RGB camera and the thermal camera to ensure that both modalities are accurately aligned. Subsequently, we use the registered images to learn the multimodal 3D Gaussians. To prevent the overfitting of any single modality, we introduce several multimodal regularization constraints. We also develop smoothing constraints tailored to the physical characteristics of the thermal modality.\nBesides, we contribute a real-world dataset named RGBT-Scenes, captured by a hand-hold thermal-infrared camera, facilitating future research on thermal scene reconstruction. We conduct comprehensive experiments to show that ThermalGaussian achieves photorealistic rendering of thermal images and improves the rendering quality of RGB images. With the proposed multimodal regularization constraints, we also reduced the model's storage cost by 90\\%. The code and dataset will be released."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"3D reconstruction; Thermal fild reconstruction; 3D Computer Vision; Machine learning approaches;"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7707bcdbdb09bc092ba173c3806f9f1d749e0f1c.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/e1e1a836ff88dc3f24a360b6cc96db5150a73053.zip"
},
"title": {
"value": "ThermalGaussian: Thermal 3D Gaussian Splatting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ybWOYIuFl6 | BNEM: A Boltzmann Sampler Based on Bootstrapped Noised Energy Matching | main | Active | neural sampler;Boltzmann distribution;diffusion model | generative models | 3;3;8;8 | 4;4;4;4 | 3;2;3;4 | 2;2;3;3 | 3;3;3;3 | 5.5 | 4 | 3 | 2.5 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Why is the distribution of interatomic distances for NEM and BNEM less sharply peaked than the ground truth for LJ-55?\n\nHave the method been tried on any larger, more complicated systems? Even if the results are not impressive, it would be useful for the field to know when this method breaks down."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The paper describes the method very clearly. In particular, figure 1 is quite concise. Moreover, it shows proof that the improvements suggested are indeed improvements. Section 3 is well written, and the score vs. energy section contribution is valuable.\n\nThe results are strong and show improvement over other methods for the toy problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a neural sampling method for known energy functions. The importance of this problem is well-established, and the distinction between this and methods learned from sample data is sound. The NEM method improves on previous neural samplers by using an energy-based training objective, using a lower noise target, and introducing a bootstrapping method. The results show improvements over other listed neural samplers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the paper clearly shows improvement over previous neural sampling methods, it still makes little or no progress on the fundamental challenge of scaling to larger systems. The authors acknowledge this, but it ultimately make this an incremental improvement, not a transformational one."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See \"Weaknesses\" above."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The MS attacks an important problem and improves upon the state of the art. The solution is original (to the best of this reviewer's knowledge) and provides theoretical as well as empirical reasons to prefer their proposal over alternatives."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors consider a setting in which we want to draw samples from a Boltzmann distribution, for which we have the ground truth energy function, but cannot normalize into a probability distribution (for the usual reasons). A very recent proposal (Akhound-Sadegh et al., 2024, \"iDEM\") for this problem is to model the \"score function\" (in Hyvarinen's sense) with a neural network, as in a diffusion model; but, lacking data samples, to target not the empirical score (via Tweedie's formula) but rather a Monte Carlo estimate written in terms of the energy function. The MC samples are drawn in an outer loop by simulating the reverse diffusion process, using the current score-estimating network.\n\nThe present MS proposes to alter iDEM slightly, by modeling the *energy* rather than the score--again by targeting a MC estimate, in this case of the energy itself rather than its gradient. They prove that (under certain conditions--I did not read the proof in detail) the error of iDEM's score estimator is strictly larger than the error of their own energy estimator; and (empirically) than the error in the score function that is produced by differentiating their energy estimator (and which matters because it will be used for sampling). To reduce variance in this estimator further still, the authors further propose a \"bootstrapping\" technique in which the target energy is not constructed from the known data energy, but instead estimated from a model energy at a smaller noise level.\n\nEmpirically, the authors show that their approach yields superior samples to iDEM as well as other recent proposals, particularly with the \"bootstrap\" improvement."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) On the GMM-40 task, the manuscript's version of iDEM (Fig. 2) is visibly inferior to the one in the original paper (Akhound-Sadegh et al, Fig. 3), which is much closer to ground truth. Something similar is true of the energy histogram for LJ-55 (Fig. 3 in this MS, Fig. 4 in op. cit.). Can the authors explain these discrepancies?\n\n(2) For DW-4, LJ-13, and LJ-55, the improvement in Wasserstein-2 distance provided by NEM and BNEM over iDEM appears marginal (Table 1). Considering the additional computational cost of evaluating the energy gradient, this may not justify the proposed approach.\n\n(3) The reported W2-E for iDEM is high, potentially due to outlier samples as pointed out in the manuscript. It would be nice to re-evaluate W2-E removing outliers for comparison with NEM and BNEM. \n\n(4) Learning energy instead of score may improve estimation, but sampling requires costly repeated gradient evaluations as steps increase. A table comparing sampling times for score-based (iDEM) and energy-based (NEM) training would be helpful.\n\n(5) The descriptions of the problem setting and proposed solution could be much improved; this reviewer, at least, found the exposition in Akhound-Sadegh et al. (2024) much clearer.\n\n(6) The Fisher divergence (Eq. 8) is a well understood quantity, with connections to KL divergence---e.g., in the context of diffusion models, it arises naturally under the standard ELBO loss. Can the authors provide any similar theoretical appeal for the squared error in energy, Eq. 10?\n\n\n\nMINOR \nDiffusion models were introduced by Sohl-Dickstein et al. in \"Deep unsupervised learning using nonequilibrium thermodynamics\" (ICML, 2015) and this paper should be referenced when they are introduced in the MS."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is generally well-written and easy to follow. The authors provide enough background to make the paper easy to understand. The topic of diffusion-based samplers is also an important and interesting topic with applications in many domains. Additionally, improving the MC estimator of the score when we have access to the energy and not data is, in my opinion, very well-motivated."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors propose Noised Energy Matching (NEM) and Bootstrap NEM (BNEM), a diffusion-based neural sampler based on previous related work (iDEM). In particular, the authors propose to parametrise the energy with a network, instead of the score, to reduce the variance of the estimator. They also propose to bootstrap the energy estimator at high noise levels from learned estimators at lower noise levels to further reduce the variance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Novelty and Motivation**: \nAlthough the NEM algorithm is a simple reparametrization of the estimator in iDEM, the paper analyzes the implications of an energy or score parametrization and shows that this simple change can lead to a better estimator. The more novel algorithm is BNEM, however, the paper does not provide enough experimental results to support the benefit of this method (see my notes under Experiments).\n\n- Theoretical Limitations: The work makes several claims regarding the variance of the estimators which aren’t precise. For example, the claim that “the variances of EK and SK explode at high noise levels as a result of the VE noising.” This isn’t correct as the variance is a function of multiple factors, including the importance sampling proposal chosen for the estimator, as well as the energy landscape itself.\nAdditionally, the proof of proposition 2 considers low-energy regions and claims that $exp{(\\mathcal{E}(x_{0|t}^{(i)})} \\geq c$. However, it isn’t clear to me how these regions are defined as the energy is evaluated at the noised samples.\n\n**Experiments**: \n- The work contains several experiments on the same datasets and benchmarks as the previously related paper (iDEM), showing better performance of the method, especially when the number of MC samples and integration steps are limited. However, I find that additional experiments, to show the benefit of lowering the variance at the cost of increasing the bias would strengthen the claims of the paper much more. For example, the performance gain from BNEM, at the cost of increasing the bias isn’t clear in my opinion. Generally, neural networks are fairly good at training in high-variance settings (e.g. diffusion models). On the other hand, for complex energy distributions and in high dimensional settings, the bias could result in significant problems. In general, I think the authors should provide more analysis of the trade-off of bias and variance when introducing the BNEM estimator.\n- The paper also claims that the proposed approach is more robust compared to iDEM as it doesn’t rely on clipping the score. However, for the LJ potentials cubic spline interpolation was used, as the energy is very sharp. It isn’t clear to me how much of an advantage this provides if smoothing is still necessary and how applicable this is to other complex and realistic settings.\n- The results for iDEM on LJ-13 and LJ-55 tasks are significantly different from what iDEM paper reported. The authors also indicate divergent training for DDS, PIS and FAB on those tasks, while iDEM reported competitive values for both FAB and PIS on LJ-13 and for FAB on LJ-55. Could the author elaborate on how and why they were unable to reproduce the results?\n- Some metrics such as NLL and ESS which are commonly reported in relevant works are not reported.\n- In Table 3, the standard deviations over the seeds isn’t reported, and the authors report the best value, making the results misleading. For example, they indicate that the mean E-W2 of LJ-55 over the 3 seeds is in fact even higher than the one they report for iDEM. This in fact confirms my concern that improving the variance of the estimator with bootstrapping at the cost of increasing the performance doesn’t necessarily lead to increased performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. To point in the weaknesses, can you give an estimate of the additional overhead with the change from score to energy parameterization? Specifically, to better understand the practical utility, an empirical comparison of the it the decrease of the variance per compute unit would need to be needed.\n2. The authors speculate on the use of a Metropolis-Hastings type correction in future work. How do they envision doing this within the current BNEM framework?\n3. Is it possible to correct for distributional bias of the neural sampler e.g. through importance sampling? it is with many of the base-lines you compare against. If so, what is the computational cost of this compared to the baselines?\n4. Why did the authors not try a standard molecular benchmark such as ALA2? Is it related to the smoothing that needs to be performed to get the LJ-n systems to train stably? If this is the case, how do you envision moving this methodology beyond toy-systems?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is clearly written and addresses an important problem\n2. The authors are transparent about the limitations of the work and in particular the challenges that remain for with regard to training stability etc."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Neural sampler of Boltzmann distributions. A variation of iDEM with better variance. The central idea of iDEM and this paper is to fit a diffusion model which generates samples which are approximately Boltzmann distributed mu(x) \\propto e^(-E(x)) where E(x) is the energy function. In iDEM model is learned by matching a score network to the gradient of a monte-carlo estimator of the noised energy at a given time, t. This estimator is simulation-free but very high variance, and a bi-level training scheme is introduced to stabilize training.\n\nThe key innovation in this paper is to match the energy instead of the gradient of the energy (force/score), which they show in Propositions 1 and 2, has smaller error and variance than training against the score. A second contribution in this paper using a bootstrapping scheme to further reduce variance, at the cost of introducing some bias."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Apart from the memory overhead highlighted by the authors in their limitations section, the new methods also introduce significant computational overhead, by having to use autograd to compute the derivative of the energy during sampling. \n2. Experiments are performed on toy systems and not systems which reflect the actual use cases of methods like this, e.g. molecules. A simple case should be included, such as ALA2.\n3. Missing comparisons to standard sampling baselines for non-normalized densities such as parallel tempering. In particular, considering the number of evaluations of E, and overall compute cost."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an energy based model to target energies of noised data which can enable independent samples generation through a denoising diffusion approach, with state-of-the-art performance."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bnem,\ntitle={{BNEM}: A Boltzmann Sampler Based on Bootstrapped Noised Energy Matching},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ybWOYIuFl6},\nnote={under review}\n}"
},
"abstract": {
"value": "Generating independent samples from a Boltzmann distribution is a highly relevant problem in scientific research, e.g. in molecular dynamics, where one has initial access to the underlying energy function but not to samples from the Boltzmann distribution. We address this problem by learning the energies of the convolution of the Boltzmann distribution with Gaussian noise. These energies are then used to generate independent samples through a denoising diffusion approach. The resulting method, Noised Energy Matching (NEM), has lower variance and only slightly higher cost than previous related works. We also improve NEM through a novel bootstrapping technique called Bootstrap NEM (BNEM) that further reduces variance while only slightly increasing bias. Experiments on a collection of problems demonstrate that NEM can outperform previous methods while being more robust and that BNEM further improves on NEM."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"neural sampler",
"Boltzmann distribution",
"diffusion model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0e1f9d50b9aee2a7ba5cce42f2af4c5e7aa4ddec.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "BNEM: A Boltzmann Sampler Based on Bootstrapped Noised Energy Matching"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ybfmpJiKXX | AIMS.au: A Dataset for the Analysis of Modern Slavery Countermeasures in Corporate Statements | main | Active | natural language processing;modern slavery;corporate statements;benchmark;text extraction;large language models | datasets and benchmarks | 3;6;6 | 4;4;3 | 2;3;3 | 2;3;3 | 1;3;2 | 5 | 3.666667 | 2.666667 | 2.666667 | 2 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I have outlined some questions in the weaknesses section. Additionally, I am curious about the authors' choice not to use traditional metrics such as Fleiss’ Kappa to evaluate inter-annotator agreement."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Below, I outline the key strengths of the paper: \n\n1. The dataset introduced in this paper could prove invaluable for legal professionals, offering a detailed resource for examining modern slavery statements. Furthermore, the annotation guidelines proposed have the potential to be adapted and applied to similar documents across different countries. \n2. The task of distinguishing between specific countermeasures and vague claims addressed by this paper is particularly noteworthy due to the common human difficulty in making such distinctions. The authors demonstrate that Language Models perform reasonably well in this respect, suggesting that these models can be effectively used to support human decision-making processes."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the use of Large Language Models (LLMs) to identify specific actions companies have taken to combat modern slavery and distinguish these from ambiguous corporate statements. To support this analysis, the paper presents a new dataset comprising around 6,000 modern slavery statements sourced from the Australian Modern Slavery Register, with annotations at the sentence level."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The weaknesses are as follows: \n\n1. I am concerned about the annotation quality of the dataset. The paper notes that 50 statements were annotated by two expert annotators in lines 240-242, but it lacks details on annotator agreement, the total number of sentences annotated, and other relevant metrics on this smaller set. \n2. The paper does not provide a detailed quantitative and qualitative analysis of Large Language Model (LLM) performance. Specific questions remain unaddressed, such as whether certain words or phrases correlate with LLM outputs or how the distribution of relevant sentences identified by LLMs compares to those identified by annotators. Additionally, it would be useful to know how many different prompts were tested on the LLMs to better understand the models’ responsiveness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "-\tRelated work can be shortened, the paper is already covering both an introduction and background (around half of the paper is just introductory pages). This is especially important given that most related work isn’t specifically targeting the exact problem at hand.\n-\tDid you annotate all statements/docs mentioned starting from line 250? \n-\tI suggest better formatting and maybe a figure to describe the process starting line 319. There are many numbers, sub-steps etc and it is hard to keep track.\n-\tThere are several IAA measure (e.g., Cohen Kappa), why use the percentage of agreement?\n-\tPlease move details on task definition to the main text, this is an important part of the work and should be made more clear.\n-\tLine 431: why limit the prompt design to C2? The results in figure 3 shows numbers for all criteria, making that sentence confusing.\n-\tI suggest adding results for un-finetuned Llama 2. The great effectiveness it is showing compared to GPT-4o is surprising and contradicting with existing literature comparing closed models to Llama 2."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "-\tThe paper is clearly written and well-organized\n-\tThe targeted domain/problem is unique yet very important and the proposed approach can be extended to other similar/relevant use cases\n-\tExperiments cover variety of setups, different types of SLMs and LLMs and with fine-tuning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper describes an approach to construct a manually annotated dataset for modern slavery countermeasures detection and analysis in text based in Australia. It also benchmarks modern language models including SLMs and LLMs in zero-shot and supervised learning settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-\tI think more justifications to decisions made in proposed approach are needed:\n \n\t-\tLine 238 – 242: more discussion and justifications are needed to explain the need for such different subsetting and annotation approaches. \n\t-\tPlease justify/further discuss the quality of the OCR tools used.\n-\tSince the dataset is the main contribution of the work, details on its annotation process should be moved to main text. For example:\n\n\t-\t11 questions are mentioned in paragraph starting from line 285, examples of these questions and potential answers should be added in main text. \n\n\t-\tIn line 305 ++ it isn’t enough to mention a company did the annotation. What are the characteristics of annotators (other than being fluent English speakers)? How was training exactly done? these are core details to establish dataset quality.\n\t-\tHow was the dataset split for train and test?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Typos in line 174: “CCalifornia” --> “California”\n\n2. A figure depicting the annotation process would be helpful and easier to follow and understand.\n\n3. I suggest putting Section3: related works in the later part of the paper so that important sections (Dataset Description and Benchmark Experiments) will come earlier.\n\n4. The temperature of LLMs is not reported. \n\n5. Llama-2's size, which is important, is not revealed in the main paper. \n\n6. The regular expressions to convert text into sentences and extract LLMs’ responses are not revealed."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The coverage of the dataset is extensive in terms of year and corporate entity (covering 7,270 entities’ statements from 2019 to 2023 after the establishment of Australian Modern Slavery Act). \n\n2. The dataset focuses on a specific field that could have significant social impact (but limited to Australia as generalizability is not shown) as it manages to enhance LLMs ability to assess modern slavery countermeasures in corporate statements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces AIMS.AU, a new benchmark containing 5,731 corporate modern slavery statements for LLMs to assess whether they meet the requirements of the Australian Modern Slavery Act. It details the construction and annotation process. The author benchmarks it over both fine-tuned open (BERT, DistilBERT, Llama2) and closed (GPT3.5-turbo, GPT4o) LLMs and finds zero-shot prompting LLM’s results aren’t comparable to fine-tuned LLMs, indicating dataset’s usage to fill the gap in corporate compliance analysis."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation of this paper is subpar. It relies too much on text and does not take advantage of tables or figures. Some important information is either missing or in the Appendix instead of the main paper. See detailed comments in the \"Questions\" section below. \n\n2. Experiments are limited (order in priority). \n - It only includes closed LLM in the setting of zero-shot prompting LLMs. The comparison between zero-shot open LLM and fine-tuned open LLM is interesting to see.\n\n - Few-shot and chain-of-thought prompting are missing. Closed instruction finetuned LLMs takes advantage of prompting techniques. It would be interesting to see how much improvement they will make using these two prompt fashions.\n\n - It only includes Llama-2-7b. Comparison between different size open-weight LLMs before and after fine-tuning is interesting to see.\n\n - Not benchmark the recent open-weight LLMs (like Llama-3)\n\n3. The broad impact of this dataset is limited. The criteria for LLMs to identify is very specific to the Australia Modern Slavery Act. Can the learnings be transferred to \"UK Modern Slavery Act 2015\" which is similar? \n\n4. Benchmark task doesn’t take full advantage of the dataset. The author can formulate a better task to benchmark LLMs or VLMs. There is image information in the dataset which could potentially contribute to classification accuracy."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We provide a new dataset of modern slavery statements annotated at the sentence level to train and evaluate language models for relevant text detection and extraction."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024aimsau,\ntitle={{AIMS}.au: A Dataset for the Analysis of Modern Slavery Countermeasures in Corporate Statements},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ybfmpJiKXX},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite over a decade of legislative efforts to address modern slavery in the supply chains of large corporations, the effectiveness of government oversight remains hampered by the challenge of scrutinizing thousands of statements annually. While Large Language Models (LLMs) can be considered a well established solution for the automatic analysis and summarization of documents, recognizing concrete modern slavery countermeasures taken by companies and differentiating those from vague claims remains a challenging task. To help evaluate and fine-tune LLMs for the assessment of corporate statements, we introduce a dataset composed of 5,731 modern slavery statements taken from the Australian Modern Slavery Register and annotated at the sentence level. This paper details the construction steps for the dataset that include the careful design of annotation specifications, the selection and preprocessing of statements, and the creation of high-quality annotation subsets for effective model evaluations. To demonstrate our dataset's utility, we propose a machine learning methodology for the detection of sentences relevant to mandatory reporting requirements set by the Australian Modern Slavery Act. We then follow this methodology to benchmark modern language models under zero-shot and supervised learning settings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"natural language processing",
"modern slavery",
"corporate statements",
"benchmark",
"text extraction",
"large language models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4c097e235185ea2dc0a77f9a3c29782a1c55fc10.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/6502266cebc14e7fd6670ae739ac60aafe00b7d1.pdf"
},
"title": {
"value": "AIMS.au: A Dataset for the Analysis of Modern Slavery Countermeasures in Corporate Statements"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
yc38vnXhTh | Towards Human-like Virtual Beings: Simulating Human Behavior in 3D Scenes | main | Active | Agent AI;3D Humanoid;Large Language Model;Deep Learning | applications to computer vision, audio, language, and other modalities | 3;5;5;5 | 3;3;4;4 | 2;3;2;2 | 1;3;2;2 | 2;2;3;2 | 4.5 | 3.5 | 2.25 | 2 | 2.25 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How do you model the belief in the value function. This part is not well explained in the paper. \n2. How did you select and calibrate the probability values (1.0/0.7/0.3/0.01) for language-based commands?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The teaser figure of this paper clearly illustrates the key differences of this paper compared to the prior art. Generating long-horizon human behaviors from high-level abstract description is promising in many fields. This paper provides a comprehensive evaluation of their proposed method, including both quantitative metrics and human evaluation, supported by thorough ablation studies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents ACTOR, an LLM system for simulating human-like behavior in 3D environments. It also proposes a comprehensive dataset, BehaviorHub, for training and evaluating such systems. ACTOR operates on a perceive-plan-act cycle, using value-based behavioral planning and hierarchical prior knowledge to decompose complex goals into achievable steps while adapting to environmental changes. The BehaviorHub dataset contains 10k human behavior samples in 1.5k 3D scenes, generated by a semi-automated pipeline that uses language models to generate plausible behavior sequences with corresponding motion data. The paper also presents an evaluation framework that measures both behavior planning and motion simulation effectiveness, and demonstrates ACTOR's superior performance compared to existing approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a few notable issues that concern me.\n\n1. Although the generation of human-like behavior from high-level description is promising, the proposed method does not show sufficient technical contributions to shed light on this problem. The use of LLM for manipulating agents or robots is not new (see [a, b]). Adopting the perception-decision-action loop is straightforward and not new either (see [c, d]). I do not see a clear statement that articulates the core contribution in techniques compared to previous methods.\n\n2. The value functions used seem relatively simplistic, using basic metrics such as shortest path and binary language-based decisions, rather than incorporating more sophisticated physical constraints or human behavior patterns. The probabilistic formulation of the value function used in this paper is somewhat trivial, as it is simply implemented by constructed decrete values, i.e., 1.0/0.7/0.3/0.01. Such a probabilistic model (p_v) does not involve any learning process, how to guarantee the that the used distribution match the actual underlying distribution?\n\n3. In addition, the baselines (LLMaP and HuggingGPT) aren't specifically designed for 3D human behavior simulation, and the human evaluation sample size is small with only five participants. It would be better if some more task-oriented baselines were used in the experiments.\n\n[a] J. Liang, W. Huang, F. Xia, P. Xu, K. Hausman, B. Ichter, P. Florence, and A. Zeng, \"Code as policies: Language model programs for embodied control,\" in International Conference on Robotics and Automation (ICRA), 2023.\n\n[b] I. Singh, V. Blukis, A. Mousavian, A. Goyal, D. Xu, J. Tremblay, D. Fox, J. Thomason, and A. Garg, \"Progprompt: Generating situated robot task plans using large language models,\" in International Conference on Robotics and Automation (ICRA), pp. 11523-11530, IEEE, 2011.\n\n[c] Y. Hu, F. Lin, T. Zhang, L. Yi, and Y. Gao, \"Look before you leap: Unveiling the power of gpt-4v in robotic vision-language planning,\" arXiv preprint arXiv:2311.17842, 2023\n\n[d] M. Skreta, Z. Zhou, J. L. Yuan, K. Darvish, A. Aspuru-Guzik, and A. Garg, \"Replan:"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Clarification from weakness 1 would be helpful."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The idea of grounding the dataset of human behaviors in 3D scenes might be useful for dataset diversity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces ACTOR and BehaviorHub.\n1) ACTOR is an agent architecture to produce human behavior data in 3D environments with ‘values’ similar to those humans set in daily life. The agent uses a perceive-plan-act cycle for scene-aware and goal-oriented planning; it also decomposes goals and decision making into a hierarchical space.\n\n2) BehaviorHub is a human behavior simulation dataset with commonsense knowledge of LLMs and motion resources with 3D scenes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are many design choices in ACTOR that were left unjustified. Additionally, since the evaluation dataset is not explained, it is difficult to gauge the performance of the method. \n\n1) Dataset creation approach based on ‘values similar to those humans set in daily life’. This is difficult to evaluate when the 'ground-truth plans' mentioned in the paper are not explained e.g., in the example below \n*For behavior planning, Sentence-BLEU (Papineni et al., 2002), and BERTScore (Zhang et al., 2019) are used to measure the semantic similarity between the ground-truth plans and predictions.* (5.4)\n\n2) Introducing hierarchical decomposition of goals and decision making. Why should candidates at the same level be restricted to executable actions or high-level semantic units of activities? I could see multiple potential paths where it might be beneficial for some paths to be longer than others. \n\n3) Perception module: Why does the below prompt *ENVIRONMENT: {residential interior}; OBJECTS: {bed, desk, chair, kitchen counter, sink, television,..., sofa}; SURROUNDINGS: {sink: empty, faucet: turned on, toilet: vacant}* help attain ‘deep understanding of the environment’.\nAre there any experiments/ previous works to justify this structure of inputs?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Comprehensive goal achievement: ACTOR can decompose high-level goals into a series of activities and actions, allowing it to accomplish complex tasks similar to humans.\n\nEnvironmental awareness: The agent can adapt its plans based on environmental changes, such as the occupancy of a room or the state of objects.\n\nValue-driven decision-making: ACTOR uses customizable value functions to evaluate and prioritize different action paths, incorporating personal beliefs and preferences.\n\nLarge-scale dataset: BEHAVIORHUB, a dataset of human behavior in 3D scenes, provides a valuable resource for agent development and evaluation, addressing the lack of a comprehensive testbed for this research area."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces ACTOR, a large language model-powered agent designed to simulate high-level, long-horizon, abstract goal-driven human behaviors in 3D scenes. The agent operates in a perceive-plan-act loop and utilizes a value-driven planning mechanism to navigate complex environments and adapt to dynamic situations. The authors compare the proposed MCTS based hierarchical framework with other LLMs to demonstrate its effiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The agent’s interaction with objects is static, and the agent’s actions do not directly impact the state of the environment, which makes it only a benckmark for LLMs and cannot further applied to multi-modal policies.\n\nThe compared baselines are outdated, more existing planning frameworks that utilized CoT, ToT should be introduced.\n\nThe design of the environment state is too simple and does not consider multi-human interaction, which makes the frameworks hard to deploy in real world."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see above Weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Propose a hierarchical structure for generating long-horizon motions. The agent should process perceived information, break down the goal into a series of activities, and create suitable action plans for each activity.\n2. The planning method must operate in a dynamic and open environment. It should be aware of whether the target space is occupied, allowing the agent to make decisions about what to do next rather than simply following a predefined script.\n3. Create the BEHAVIORHUB dataset, which will include a hierarchical decision tree and related motions. This dataset is essential for advancing more embodied tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces ACTOR, an agent designed to achieve high-level, long-horizon abstract goals within 3D household environments, guided by internal values akin to human motivations. ACTOR functions within a perceive-plan-act cycle, enhancing a scene-agnostic, ungrounded LLM controller through systematic goal decomposition and informed decision-making. This is accomplished by actively exploring the behavior space, generating activity options based on a hierarchical framework, and assessing these options through customizable value functions to determine the most appropriate subsequent actions. Additionally, the paper presents the BEHAVIORHUB dataset, which automates the alignment of motion resources with 3D scenes to facilitate informed generation. Comprehensive experiments demonstrate that ACTOR significantly outperforms established baselines, nearly doubling the overall success rate."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. All the subgoals are generated by the LLM, but the results are not always reliable. How do you address any nonsensical outputs? Is there a significant amount of human feedback during the process?\n2. The input to the LLM isn't very clear beyond just the object and action. Should we also consider global position, direction, volume of objects, and so on?\n3. In the real-valued function, what does “distance” refer to?\n4. The ACTOR uses a transformer-based algorithm to blend motions, but Figure 4 only shows different human-object interactions. It does not illustrate the connections and continuity between different actions, which contradicts one of the article’s contributions regarding long-horizon planning."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This study investigates the simulation of high-level, long-horizon, abstract goal-driven human behaviors in 3D scenes."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Human-like Virtual Beings: Simulating Human Behavior in 3D Scenes},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=yc38vnXhTh},\nnote={under review}\n}"
},
"abstract": {
"value": "Building autonomous agents that can replicate human behavior in the realistic 3D world is a key step toward artificial general intelligence. This requires agents to be holistic goal achievers and to naturally adapt to environmental dynamics. In this work, we introduce ACTOR, an agent capable of performing high-level, long-horizon, abstract goals in 3D households, guided by its internal value similar to those of humans. ACTOR operates in a perceive-plan-act cycle, extending the ungrounded, scene-agnostic LLM controller with deliberate goal decomposition and decision-making through actively searching the behavior space, generating activity choices based on a hierarchical prior, and evaluating these choices using customizable value functions to determine the subsequent steps. Furthermore, we introduce BehaviorHub, a large-scale human behavior simulation dataset in scene-aware, complicated tasks. Considering the unaffordable acquisition of human-authored 3D human behavior data, we construct BehaviorHub by exploring the commonsense knowledge of LLMs learned from large corpora, and automatically aligning motion resources with 3D scene for knowledgeable generation. Extensive experiments on our established benchmark demonstrate that the proposed architecture leads to effective behavior planning and simulation. BehaviorHub also proves beneficial for downstream task development. Our code and dataset will be publicly released."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Agent AI",
"3D Humanoid",
"Large Language Model",
"Deep Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8516854a9533be262971bac9bc7d8bc3992e9d02.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Human-like Virtual Beings: Simulating Human Behavior in 3D Scenes"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ydH8nU5csJ | DTVLT: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on LLM | main | Withdraw | Visual Language Tracking;Video Understanding;Large Language Model | datasets and benchmarks | Xuchen Li;Shiyu Hu;Xiaokun Feng;Dailing Zhang;Meiqi Wu;Jing Zhang;Kaiqi Huang | ~Xuchen_Li1;~Shiyu_Hu1;~Xiaokun_Feng1;~Dailing_Zhang2;~Meiqi_Wu2;~Jing_Zhang47;~Kaiqi_Huang1 | 3;5;5;5;5 | 5;3;3;4;3 | 3;3;3;3;2 | 3;3;3;3;2 | 3;3;3;3;2 | 4.6 | 3.6 | 2.8 | 2.8 | 2.8 | -0.875 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "(1) What are the specific scientific challenges this dataset aims to address, beyond what existing benchmarks like Dtllm-vlt have already solved?\n(2) What quality control measures were implemented to ensure that LLM-generated texts accurately describe the visual content, and how were issues like semantic bias or redundancy managed?\n(3) For additional questions, please refer to the Weaknesses section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "(1) Enhanced Text Diversity: The benchmark introduces a multi-granularity approach to text descriptions, which offers different semantic densities. This allows for a finer evaluation of VLT algorithms' ability to handle both concise and detailed language input.\n(2) Integration with LLMs: By leveraging LLMs, the paper automates text generation, potentially reducing the need for costly manual annotations and allowing for large-scale text generation across various scenarios.\n(3) Comprehensive Experimental Setup: The paper evaluates multiple VLT models on DTVLT, providing a detailed analysis of how different text granularities impact tracking performance. This analysis can offer insights into the limitations of current VLT models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents DTVLT, a new multi-modal benchmark for Visual Language Tracking (VLT), aiming to enrich existing VLT datasets by introducing diverse textual annotations generated by large language models (LLMs). DTVLT is built upon five existing VLT and SOT benchmarks, and it employs a multi-granularity text generation strategy to provide descriptions of varying lengths and detail levels. The benchmark is designed to support three main tasks—short-term tracking, long-term tracking, and global instance tracking—providing a more comprehensive environment for evaluating VLT algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) This paper appears more like a pipeline proposal, merely leveraging existing models to enhance an existing dataset, without focusing on the scientific questions that should be addressed when establishing this dataset.\n(2) The motivation is unclear, as Dtllm-vlt[1] seems to have already addressed the short-term tracking, long-term tracking, and global instance tracking challenges mentioned in this paper.\n(3) DTVLT heavily relies on LLM-generated text descriptions but lacks a quality control method for these generated texts. The automatically generated text may contain semantic biases, inconsistencies, overly long or redundant information, and may even be disconnected from the visual content, particularly in multi-modal tasks. Analyzing the quality of generated texts and investigating how to produce better descriptions would be more worthwhile.\n\n[1] DTLLM-VLT: Diverse Text Generation for Visual Language Tracking Based on LLM."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "This manuscript may violate the anonymity policy, as the authors mention in lines 242-243: 'To overcome these challenges, we have developed DTLLM-VLT (Li et al. (2024a)),."
},
"flag_for_ethics_review": {
"value": [
"Yes, Other reasons (please specify below)"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the concerns and issues raised in the \"Weaknesses\"."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The manuscript is well-structured, with clear, fluent writing that makes the content easy to understand.\n\n2. The authors introduce a new benchmark, DTVLT, based on five prominent VLT and SOT benchmarks, addressing three tracking tasks: short-term tracking, long-term tracking, and global instance tracking. DTVLT provides four granularity combinations, capturing the extent and density of semantic information, and incorporates DTLLM-VLT, which uses LLM to generate diverse, high-quality language descriptions.\n\n3. Comprehensive experimental analyses evaluate the effect of diverse textual descriptions on tracking performance, with insights into performance bottlenecks in existing algorithms that may aid future VLT research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a new benchmark, DTVLT, for Visual Language Tracking (VLT) that enhances traditional single-object tracking by integrating more diverse linguistic data through large language models (LLMs). Current VLT benchmarks rely on brief, standardized annotations, which limit the depth of video content understanding, leading algorithms to rely on memorization rather than genuine comprehension. By leveraging LLMs, DTVLT provides varied semantic annotations at multiple levels of detail for existing VLT and single-object tracking datasets, supporting tasks like short-term, long-term, and global instance tracking. This benchmark includes four text granularity levels, enabling richer semantic understanding by capturing broader context and subtle video content dynamics. Experimental analyses show that diverse text annotations improve tracking performance and reveal limitations in current algorithms, encouraging further VLT research."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Table 3, did the author use the original implementations and pretrained weights for these trackers (MMTrack, JointNLT, and UVLTrack)? If so, it would be helpful for the author to discuss any discrepancies between their results and those reported in the original papers, as the performance results for these trackers in Table 3 do not align with the performance reported in their respective publications.\n\n2. As this paper proposes a benchmark, additional trackers should be tested to fully evaluate it (i.e., VLT (\"Divert More Attention to Vision-Language Tracking\")).\n\n3. I am skeptical about the necessity of the \"Initial Concise\" annotation by the LLM. This annotation seems less effective than the official language, as the performance decreases when directly testing the trackers, as shown in Table 3. Additionally, Figure A7 indicates that even with retraining, performance still declines with the \"Initial Concise\" annotation. Could the author provide a more detailed analysis of why they included the \"Initial Concise\" annotation despite its apparent limitations, and what specific value it adds to the benchmark.\n\n4. When directly testing tracking performance with \"Dense Concise\" or \"Dense Detailed\" annotations—such as for the N-th test frame in a video, which language annotation does the author use? How their choice of annotation for each frame might impact the tracking results, and whether they considered alternative approaches.\n\n5. In the training process using \"Dense Concise\" or \"Dense Detailed' annotation, the model needs sample the N-th training frame from the given video. Which language annotation does the author use in this case. how the choice of annotation during training might affect the model's ability to generalize to different text granularities during testing.\n\n6. In Figure A7, the performance with \"Dense Detailed\" annotation appears worse for the AUC metric. Could the author provide a hypothesis for why the \"Dense Detailed\" annotation leads to worse AUC performance, and to discuss the implications of this finding for the design of VLT systems."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed DTVLT is the first comprehensive VLT benchmark using LLM to provide multi-granularity and diverse semantic information.\n\n- The quantity of the dense texts in DTVLT is 45.9 times larger than the official annotations of previous datasets, which will highly support further research in VLT and video understanding.\n\n- The paper is easy to follow and clearly states its contributions, providing a comprehensive presentation of the content, including bad cases."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new visual language tracking benchmark featuring diverse texts, built upon five prominent VLT and SOT benchmarks. It is the first to leverage the extensive knowledge base of large language models (LLMs) to deliver multi-granularity and diverse semantic information. Experiments initially demonstrate the impact of diverse texts on tracking performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Tracking by natural language specification enhances human-machine interaction and has the potential to improve tracking outcomes. Although this paper introduces four levels of granularity in text annotations within the benchmark, the individual contributions of each text type to model performance remain ambiguous. As shown in Figure 5, models retrained on the DTVLT dataset exhibit improved performance on DTVLT; however, it is difficult to determine whether these models have merely memorized the patterns within DTVLT. Therefore, could you evaluate the models trained on DTVLT on an entirely different VLT dataset to assess model performance improvements? Additionally, could you train separate models on each of the four text granularity datasets and compare their performance to isolate the impact of each type?\n\n- DTVLT provides significantly more diverse semantic information than previous datasets, with texts generated by large language models (LLMs), which differ from human language. Therefore, conducting real-world experiments to compare models trained with and without DTVLT is valuable for evaluating performance and robustness in practical applications. Have you considered conducting user studies or real-world deployment tests in specific domains like autonomous driving or surveillance to compare the performance of models trained with and without DTVLT?\n\n- Considering the word distribution in DTVLT, as shown in Figure 4, terms like 'center', 'positioned', and 'black' constitute a large proportion, which may lead to bias in models trained on this dataset. It would be beneficial to analyze how this distribution impacts model performance. Could you conduct an ablation study to measure how removing or reducing the frequency of common terms like 'center', 'positioned', and 'black' affects model performance? \n\n- In terms of bad cases, complex scenarios with multiple similar objects often pose difficulties. As shown in Figure A9, many objects match the detailed description, such as 'A zebra is standing in the middle of the field', which may make it harder for the model to accurately identify the target. Therefore, filtering out redundant information may enhance the quality of the dataset. Have you considered assessing the frequency of such ambiguous cases in the dataset and implementing a post-processing step to refine the generated descriptions?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Given the paper’s use of LLM-based information, a question arises: why not expand the scope from single-object to multi-object tracking, a more semantically challenging task that could further elevate the benchmark’s utility? Multi-object tracking would offer a richer context for assessing model capabilities in complex, real-world scenarios and create additional opportunities to explore and refine model understanding in multi-modal video tracking."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work presents a highly promising and comprehensive research direction for the community by providing annotations at different granularities across multiple scales, addressing a significant gap in current Visual Language Tracking (VLT) and Single Object Tracking (SOT) benchmarks. The addition of annotations at various levels of detail and frequency enriches the benchmark and enhances its adaptability to different levels of semantic complexity, which is crucial for future VLT research. By covering short-term, long-term, and global instance tracking sub-tasks, this benchmark enables a broader application range and contributes to a nuanced understanding of tracking performance across varying degrees of difficulty, making it a valuable resource for the community.\n2. The paper is very well-written, with a clear and concise structure that effectively conveys the research objectives, methodology, and findings. The coherent organization and compact style allow readers to easily follow the research narrative and understand the underlying significance of the proposed benchmark. The structured approach also highlights the value of including multiple granularity levels in annotations, underscoring the need for such diversity to achieve a deeper understanding of video content.\n3. The authors perform a thorough analysis of the dataset structure, providing the research community with a clear picture of how this dataset challenges existing models. This comprehensive evaluation allows researchers to recognize the distinct aspects of the dataset that may stress-test model capabilities. By offering insights into the specific challenges posed by the dataset’s diverse annotations, the authors present a strong case for the benchmark’s relevance in advancing multi-modal video understanding, especially given its focus on varied text descriptions and granularities.\n 4. The study maximizes the potential of large language models (LLMs) and pre-trained models, creating robust annotations that are contextually rich and diverse. Leveraging these models has allowed the authors to generate a wide range of annotations that are both high-quality and informed by extensive world knowledge. This methodological approach is particularly commendable, as it goes beyond simple label generation and instead creates an annotation set that reflects nuanced information, adding substantial depth to the benchmark."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the limitations of current Visual Language Tracking (VLT) benchmarks, which often rely on simple, human-annotated text descriptions that lack nuance, stylistic variety, and granularity, leading algorithms to adopt a memorization approach rather than achieving genuine video content understanding. To overcome this, the authors leverage large language models (LLMs) to generate diverse semantic annotations for existing VLT and single object tracking (SOT) benchmarks, creating a new benchmark called DTVLT. DTVLT includes varied text descriptions across multiple levels of detail and frequency, supporting three sub-tasks: short-term tracking, long-term tracking, and global instance tracking. Through the method DTLLM-VLT, they produce high-quality, world-knowledge-rich descriptions in four levels of granularity. Experimental analyses on DTVLT reveal the effects of text diversity on tracking performance, aiming to uncover current algorithm limitations and drive advancements in VLT and video understanding research."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the dataset is a valuable addition to the field, offering some level of novelty, it falls short of significant innovation. The work’s main strength lies in diversifying existing benchmark annotations rather than introducing a groundbreaking methodology or framework. Although this level of novelty is acknowledged, the lack of an entirely new approach may limit the perceived impact of the benchmark within the research community. The work could benefit from a clearer distinction between its novel contributions and those of prior benchmarks to emphasize its unique value.\n2. The dense captioning provided by the dataset primarily centers on spatial relationships, which, while useful, presents a relatively narrow challenge to models. By focusing mainly on spatial relations, the annotations risk being overly uniform in terms of complexity, potentially limiting the depth of the benchmark’s impact on model evaluation. A more varied approach, incorporating a broader spectrum of semantic relationships beyond spatial ones, could introduce more diverse challenges for model training and testing, offering a more robust assessment of model capabilities.\n3. The paper does not introduce new baseline models, especially for dense caption-based tasks, to validate the proposed benchmark’s utility. The absence of baseline models that are specifically designed to adapt to the dense captioning structure leaves an open question about whether existing models are sufficient or if structural changes are required to adapt models without compromising previous task integrity. Designing or adapting baseline models could serve as a valuable proof of concept, demonstrating the benchmark’s effectiveness and providing the community with a clear starting point for future research on model adaptations for dense captioning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The presentation is quite clear and the motivation is sound. Figures and tables are well-illustrated.\n2. The introduced DTVLT is a comprehensive benchmark that incorporates diverse text annotations, addressing the limitations of existing VLT benchmarks which often rely on uniform descriptions. \n3. The authors claim that the uniformity of texts may lead to \"memorizing the answers”, and they conduct relevant experiments on the new benchmark to testify that variation in texts has a significant impact on tracking performance, which further bolsters the significance of DTVLT."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes DTVLT, a novel benchmark for Visual Language Tracking that integrates diverse text annotations across several prominent VLT and SOT benchmarks, which is generated via the usage of LLMs. DTVLT includes four levels of text granularity, allowing for a richer representation of video content, which is beneficial for VLT and video understanding research. Authors also conduct well-rounded experiments evaluating the impact of diverse text on tracking performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The data generation pipeline as well as the data collection procedure are lack of novelty, by simply using previous DTLLM-VLT and VLT and SOT benchmarks.\n2. The quality of the benchmark relies heavily on the performance of DTLLM-VLT, and there seems to be no correction/filtering strategy to compensate for this.\n3. The evaluation of VLT methods should be more comprehensive, including more NLT approaches to further validate the effectiveness of DTVLT"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nli2024dtvlt,\ntitle={{DTVLT}: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on {LLM}},\nauthor={Xuchen Li and Shiyu Hu and Xiaokun Feng and Dailing Zhang and Meiqi Wu and Jing Zhang and Kaiqi Huang},\nyear={2024},\nurl={https://openreview.net/forum?id=ydH8nU5csJ}\n}"
},
"abstract": {
"value": "Visual language tracking (VLT) has emerged as a cutting-edge research area, harnessing linguistic data to enhance algorithms with multi-modal inputs and broadening the scope of traditional single object tracking (SOT) to encompass video understanding applications. Despite this, most VLT benchmarks still depend on succinct, human-annotated text descriptions for each video. These descriptions often fall short in capturing the nuances of video content dynamics and lack stylistic variety in language, constrained by their uniform level of detail and a fixed annotation frequency. As a result, algorithms tend to default to a “memorize the answer” strategy, diverging from the core objective of achieving a deeper understanding of video content. Fortunately, the emergence of large language models (LLMs) has enabled the generation of diverse text. This work utilizes LLMs to generate varied semantic annotations (in terms of text lengths and granularities) for representative SOT benchmarks, thereby establishing a novel multi-modal benchmark. Specifically, we (1) propose a new visual language tracking benchmark with diverse texts, named DTVLT, based on five prominent VLT and SOT benchmarks, including three sub-tasks: short-term tracking, long-term tracking, and global instance tracking. (2) We offer four granularity texts in our benchmark, considering the extent and density of semantic information. This is achieved through DTLLM-VLT, a method for generating high-quality, diverse text by leveraging the extensive knowledge base of LLMs to produce descriptions rich in world knowledge. We expect this multi-granular generation strategy to foster a favorable environment for VLT and video understanding research. (3) We conduct comprehensive experimental analyses on DTVLT, evaluating the impact of diverse text on tracking performance and hope the identified performance bottlenecks of existing algorithms can support further research in VLT and video understanding."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Xuchen_Li1",
"~Shiyu_Hu1",
"~Xiaokun_Feng1",
"~Dailing_Zhang2",
"~Meiqi_Wu2",
"~Jing_Zhang47",
"~Kaiqi_Huang1"
]
},
"authors": {
"value": [
"Xuchen Li",
"Shiyu Hu",
"Xiaokun Feng",
"Dailing Zhang",
"Meiqi Wu",
"Jing Zhang",
"Kaiqi Huang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Visual Language Tracking",
"Video Understanding",
"Large Language Model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "li|dtvlt_a_multimodal_diverse_text_benchmark_for_visual_language_tracking_based_on_llm"
},
"pdf": {
"value": "/pdf/fd5b0c7395e7f89d5fd23080136eb133b12041e7.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/918bb47d814809eab1cdebeec7ec8f42282b85dd.zip"
},
"title": {
"value": "DTVLT: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on LLM"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
ydREOIttdC | Federated Class-Incremental Learning: A Hybrid Approach Using Latent Exemplars and Data-Free Techniques to Address Local and Global Forgetting | main | Active | Class-Incremental Learning;Federated Learning;Global Forgetting;Local Forgetting. | other topics in machine learning (i.e., none of the above) | 3;5;5;6 | 4;4;5;3 | 2;3;2;3 | 2;2;2;2 | 2;3;2;3 | 4.75 | 4 | 2.5 | 2 | 2.5 | -0.324443 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tIn Equation (3), what do the subscripts $i$ and $j$ represent? Additionally, in Equation (4), what does $j_l$ signify?\n2.\tWhen clients train on the same task, they each have data from different classes. How is the classifier configured in this scenario? Is it set up in a task-incremental mode or a class-incremental mode?\n3.\tThe authors should clearly explain how the proposed method achieves sample replay through Equations (10) and (11)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tThe authors accurately summarize the limitations of current exemplar-based and data-free approaches.\n2.\tExperimental results indicate that the proposed method achieves lower computational and memory overhead compared to several optimal baseline methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper categorizes current work in federated class-incremental learning into exemplar-based and data-free approaches, noting that exemplar-based methods face memory constraints and potential privacy risks, while current data-free methods suffer from efficiency issues. The authors propose an HR mathematical framework to address both local and global forgetting. HR leverages a new autoencoder and Jones Potential formulations to generate synthetic data with minimal memory overhead, aimed at mitigating the forgetting problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe motivation of this study appears somewhat outdated, as the results in the literature (e.g., [1]) indicate that method has already effectively addressed the issue of class imbalance within clients. \n2.\tThe authors should clearly outline the specific problem their work addresses. For example, in Figure 1, it is necessary to further clarify the mechanisms causing local forgetting and global forgetting, with separate, detailed explanations for each. \n3.\tThe study lacks essential comparative methods.\n4.\tThe study lacks visualized experimental results, such as accuracy on all old tasks after completing each task, and is missing essential forgetting metrics, such as Backward Transfer (BWT).\nReference:\n[1]\tYavuz Faruk Bakman, Duygu Nur Yaldiz, Yahya H. Ezzeldin, Salman Avestimehr. Federated Orthogonal Training: Mitigating Global Catastrophic Forgetting in Continual Federated Learning. ICLR 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* The paper mentions that exemplar-based methods are memory-intensive. Could the authors provide an estimate of this memory cost, along with a comparison of memory usage between HR and other exemplar-based methods? An example or comparison with HR could help clarify this point.\n* While latent exemplars may save memory, the process of forwarding these exemplars through the decoder for sample generation could incur computational costs. It would be helpful if the authors discuss or quantify these costs when addressing the efficiency of their approach.\n* What metric is used for evaluation? Motivation of paper is mainly local and global forgetting but there is not any result or evaluation for forgetting.The table results do not clearly specify whether they represent incremental accuracy or the accuracy of the last task. Besides, higher incremental accuracy does not directly indicate lower forgetting. \n* The authors state that local and global forgetting are caused by class imbalances at both the local and global levels. Do they have any scenarios or relevant results that illustrate this point better?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The paper addresses an important problem in FCIL by combining data-based and data-free approaches to overcome local and global forgetting.\n* The hybrid approach integrates both latent exemplars and synthetic data generation, which are efficiently used to mitigate forgetting and results show that HR works better.\n* The mathematical formulation provided to describe these forgetting issues offers theoretical foundation.\n* Ablation studies in the paper contribute valuable insights into different aspects of HR and improve the interpretability of the approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses Federated Class-Incremental Learning by focusing on two types of forgetting: local forgetting (at the client level) and global forgetting (between clients). The authors propose a hybrid approach, called Hybrid Replay (HR), which combines data-based and data-free methods to mitigate these issues. They introduce a mathematical formulation to formalize the forgetting problem and the presented approach HR. The approach uses autoencoders for synthetic sample generation and latent exemplars. Comparisons against other data-free and data-based approaches demonstrate that HR achieves better performance results."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major Concerns:\n* The methodology, particularly the role of the autoencoder in addressing local forgetting, is not fully clear and can be explained better. For instance, while the paper states that the autoencoder helps address local forgetting, the specific details of how this is achieved are somewhat vague. The paper would benefit from a more detailed, step-by-step breakdown of how the autoencoder is employed for both local and global forgetting.\n* The paper lacks a clear visual representation of the HR approach. Including a diagram of the proposed method could significantly enhance understanding, especially as the provided Figure 1 only illustrates the problem without outlining the proposed solution. I believe such visual representations make papers to understand much better.\n* The results mention comparisons with a \"Hybrid Approach,\" but there’s little discussion on how HR stands out from other hybrid methods, such as REMIND+ What makes HR approach unique when compared to other Hybrid Approach ? Clarifying these distinctions would strengthen the contribution of HR to the field.\n* The conclusion lacks discussion on the limitations of HR and potential directions for future work. Addressing this would provide a perspective on the approach’s implications and its broader applicability.\n* The method heavily relies on data generation based on latent exemplars and class centroids, which raises concerns since we don't have a direct control of generated data and Variational Autoencoders (VAEs) are known to be suboptimal for high-quality synthetic data generation. Over time, this could degrade the quality of latent features and ultimately impact classification performance.\n* The paper frequently references the Lennard-Jones formulation, but it doesn’t provide enough explanation about its purpose or why it’s important for the proposed method.\n\nMinor Concerns:\n* In Section 4, line 232, the acronym \"AHR\" is introduced without prior definition.\n* The caption for Table 1 could be made more descriptive to make the table more self-explanatory."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the data partition of FCIL? Are the classes in different tasks disjoint? In the traditional CIL, categories in different tasks are disjoint. From the setting of ImageNet-Subset (10/20/100/10, 20/10/100/10) and Tiny-ImageNet (10/5/300/30), if A denotes the number of tasks, B denotes the classes per task and the classes in different tasks are disjoint, the total numbers of classes will be 200 for ImageNet-Subset and 50 for Tiny-ImageNet. However, the total numbers of categories in these two datasets are 100 and 200 respectively. Could you explain more about this?\n2。 Why does each client need to train from task 1? From Algorithm 1, the client only needs to update $theta_{h}$, but in the line 3 of Algorithm 2, the algorithm begins h=1.\n3. The practical FL systems may have stragglers. It is interesting to know whether the proposed HR algorithm can deal with the issue of stragglers."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper establishes the revolution of training loss functions within the scope of both training tasks and clients, demonstrating the two challenges of global and local forgetting.\n2. A novel replay mechanism with centroids of each category is presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This proposes a mathematical framework to demonstrate the global/local forgetting of FCIL and propose the Hybrid Replay (HR) to addressed these issues."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation is not very clear. For example, exemplars in HR and other exemplar-based methods seem different but are used interchangeably in this paper. Also, I cannot get the indication of global/local forgetting in Figure 1.\n2. The mathematical formulation of FCIL and the proposed approach are not linked closely. Can you provide more information that how you establish the method based on the framework, especially for the global forgetting? It seems that the HR benefits from the class centroid embeddings and use them to address the global forgetting, is there any further analysis?\n3. Experiments are not sufficient. The results are limited to the LDA setting with alpha=1. Extended empirical results under different skewness (e.g., alpha=0.1 or more) should be included.\n4. Analysis of memory footprint should be included, e.g., the number of parameters need to be stored and transferred during the communication.\n5. Error in literature review. The Prototype reminiscence and augmented asymmetric knowledge aggregation [1] only addresses the CIL and it is placed within the FCIL methods.\n\n[1] Wuxuan Shi and Mang Ye. Prototype reminiscence and augmented asymmetric knowledge aggregation for non-exemplar class-incremental learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1772–1781, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "see the questions in the weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.The paper introduces Hybrid Rehearsal (HR), which combines the benefits of data-based (exemplar-based) and data-free approaches. This hybrid approach leverages latent exemplars for local forgetting and data-free techniques for global forgetting, providing a comprehensive solution to the forgetting problems in FCIL.\n2. The authors develop a mathematical framework to formalize the challenges of local and global forgetting in FCIL. This framework not only aids in understanding the underlying problems but also provides a theoretical basis for the proposed solutions.\n3. The paper provides extensive experimental evaluations across multiple benchmarks and compares the proposed approach with state-of-the-art baselines, demonstrating the effectiveness of HR.\n4.The paper is well-organized and most related works are properly cited."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents an approach named Hybrid Rehearsal (HR) for Federated Class-Incremental Learning (FCIL), addressing the challenges of local and global forgetting due to class imbalance. HR employs a customized autoencoder for both classifying data and generating synthetic data, leveraging latent exemplars to tackle local forgetting and synthetic data generation to overcome global forgetting. The paper's contributions include a mathematical framework to formalize forgetting in FCIL, a novel autoencoder design that balances class-specific clustering and data distribution modeling, and extensive experiments demonstrating HR's effectiveness over existing methods with low computational and memory costs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper mentions using a customized autoencoder to leverage features for replay. I'm curious about what would happen if the encoder itself experiences forgetting? Additionally, since the stored features are fixed, but the encoder is continuously updated, how is this distribution inconsistency handled? 2. The paper mentions that the client receives class centroid embeddings ${p_ij}$ (line 8 of Algorithm 1). These embeddings enable the client to generate synthetic data representing tasks from other clients. However, if the received class centroid embeddings {pij} are for classes that the client has not seen, how can synthetic data be generated, and could this be detrimental to the client's learning? 3. How can the bias problem caused by multiple clients each learning a subset of categories be resolved when uploading the global model for model training and merging?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A mathematical framework for federated class-incremental learning and a hybrid approach to overcome local and global forgetting."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024federated,\ntitle={Federated Class-Incremental Learning: A Hybrid Approach Using Latent Exemplars and Data-Free Techniques to Address Local and Global Forgetting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ydREOIttdC},\nnote={under review}\n}"
},
"abstract": {
"value": "Federated Class-Incremental Learning (FCIL) refers to a scenario where a dynamically changing number of clients collaboratively learn an ever-increasing number of incoming tasks. FCIL is known to suffer from local forgetting due to class imbalance at each client and global forgetting due to class imbalance across clients. We develop a mathematical framework for FCIL that formulates local and global forgetting. Then, we propose an approach called Hybrid Rehearsal (HR), which utilizes latent exemplars and data-free techniques to address local and global forgetting, respectively. HR employs a customized autoencoder designed for both data classification and the generation of synthetic data. To determine the embeddings of new tasks for all clients in the latent space of the encoder, the server uses the Lennard-Jones Potential formulations. Meanwhile, at the clients, the decoder decodes the stored low-dimensional latent space exemplars back to the high-dimensional input space, used to address local forgetting. To overcome global forgetting, the decoder generates synthetic data. Furthermore, our mathematical framework proves that our proposed approach HR can, in principle, tackle the two local and global forgetting challenges. In practice, extensive experiments demonstrate that while preserving privacy, our proposed approach outperforms the state-of-the-art baselines on multiple FCIL benchmarks with low compute and memory footprints."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Class-Incremental Learning",
"Federated Learning",
"Global Forgetting",
"Local Forgetting."
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0ee82e2c4af49e467d30a24fcda6accc6c794e1c.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/50e2c22884e17dd75614db4f7d5066cd29e3be19.zip"
},
"title": {
"value": "Federated Class-Incremental Learning: A Hybrid Approach Using Latent Exemplars and Data-Free Techniques to Address Local and Global Forgetting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
ydlDRUuGm9 | On the expressiveness and spectral bias of KANs | main | Active | Kolmogorov-Arnold Network;Spectral Bias;Approximation Theory | learning theory | 3;6;6;8 | 5;2;2;4 | 2;3;3;3 | 1;2;3;3 | 2;2;3;3 | 5.75 | 3.25 | 2.75 | 2.25 | 2.5 | -0.404226 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.Theoretical Expansion: How does the depth of Kolmogorov-Arnold Networks (KANs) influence their spectral bias and approximation capabilities? Could a deeper theoretical analysis be provided to explore this relationship?\n\n2.Enhanced Experimental Design:Could the authors include more rigorous statistical analysis and comparative experiments with a broader set of baseline models, particularly state-of-the-art neural architectures used in scientific computing?\n\n3.Real-World Applications: How could the practicality of KANs be demonstrated in more diverse and realistic scenarios? Are there plans to showcase examples that highlight their advantages and limitations?\n\n4. Detailed Discussion: Could the authors provide a more detailed discussion on the computational cost, scalability, and limitations of KANs? What are the key factors that may affect their broader applicability?\n\n5. The author should"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.Theoretical Comparison of Expressiveness: The authors establish that KANs are at least as expressive as MLPs. Specifically, they show that any MLP can be reparameterized as a KAN with degree k splines which is only slightly larger. Conversely, they demonstrate that KANs can also be represented by MLPs, but the number of parameters increases significantly with the grid size of the KAN. This implies that KANs with large grid sizes may be more efficient in approximating certain classes of functions.\n2. The exploration of KANs’ spectral bias and their comparison with MLPs provides a novel perspective. The use of B-splines and the grid extension method offer unique contributions. However, much of the theoretical groundwork relies heavily on established techniques."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the theoretical and empirical properties of Kolmogorov-Arnold Networks (KANs), particularly focusing on their approximation capabilities and spectral bias. Given the growing interest in alternative neural network architectures for function approximation, the study of KANs is relevant to the broader machine learning and scientific computing community. However, while the comparison with MLPs is insightful, the direct impact and importance of KANs over existing methods are not convincingly demonstrated."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The parameter count in KANs and MLPs are not directly comparable, and the authors have blurred the distinction between these concepts. Additionally, the depth of KANs differs from that of MLPs, and the authors should not directly compare them, as this leads to a conceptual confusion. \nWe acknowledge that the conceptual confusion arises from the discussion in Section 3, particularly in Theorem 3.1- 3.4, where we compare the parameter count and depth between KANs and MLPs. This issue is also present in the experimental results in Sec4.1 and 4.2, where parameter comparisons are made across different models. \n\n2. Theorem 4.1 lacks clarity regarding its implications for KAN's performance on high-frequency data. The authors should explicitly state whether the theorem suggests that KANs outperform other models in handling high-frequency data. Additionally, it would be beneficial to provide a brief discussion or corollary connecting the theorem to spectral bias or high-frequency performance to clarify its practical significance.\n\n3.The theoretical analysis provided in the paper is limited to shallow KANs, which restricts the generalizability of the conclusions regarding spectral bias. To enhance the practical relevance, the authors should discuss the limitations of their current analysis and suggest potential methods for extending the theoretical framework to deeper KANs. This would provide a more comprehensive understanding of the spectral bias in real-world applications.\n\n4.While the experiments cover diverse applications, they lack rigorous statistical validation to substantiate the claimed advantages of KANs. The authors should include specific statistical tests or validation methods to strengthen their results. Furthermore, more detailed comparisons with state-of-the-art baseline methods would help provide a stronger foundation for the claimed benefits of KANs, especially in practical applications like PDE solving and Gaussian random field fitting.\n\n5.Incomplete Problem Contextualization: The paper does not clearly establish the practical significance of overcoming spectral bias in real-world tasks, nor does it compare KANs against state-of-the-art methods beyond MLPs.\n\n6.Limited Discussion on Limitations: The potential downsides of KANs, such as computational cost, overfitting, and practical implementation challenges, are not thoroughly addressed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have following questions:\n\n(1). All the theorems 3.1, 3.2 and 3.3 are extremely mathematical, and their assumptions are not agreeable with practical cases. How can the authors explain the main differences?\n\n(2). For the examples of PDE, the equation is very simplie, and the functions are of sufficient smoothness. If the functions concerned becomes chirp function and so on, how about the performance?\n\n(3) In Fig. 4, the performance of approximation does not always tend to better. Can you explain the causes?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper characterizes the representation capacility of approximation of KAN and MLP, with a considerably rigorous mathematical analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper focuses on two aspects of KAN and MLP. The first issue is the approximation and representation capacility to functions of both networks, and the second issue is the spectral bias of approximation. Both issues are very important and attractive."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "On the expressiveness, it is merely a theoretical interest, and the proof of mathematical analysis is over simplified by assumpting extremely shallow KAN network, which violates the practical deep network situation, and can not be regarded as equalivent to general case or nonlinear effect. \n\nThe experiments are not sufficient, and the paper is mainly of theoretical approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Is the comparison between KANs with kth order B-splines with MLP modified when considering relu-k instead of simple relu?\n- In Figure 3 and 4 the learning of KANs display instabilities in contrary to MLP which are attributed to grid extension during the undersampled regime, I don't really understand what is meant by this, is it that the grid size of KAN is progressively extended during learning? \n\nFinally as a minor point:\nI found that the way the experimental results are presented could be more concise and more informative. Figures 1 and 2 are almost identical, while all the plots given in figure 3 and 4 could be possibly synthetized to give the general behaviour w.r.t. to k, grid size, dimension and scale, and the legend should be magnified, I can't read anything on a printed version."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "the questions addressed in the paper are interesting and some of the statements are quite strong, in particular th. 4.1 on the characterization of the spectral bias. Overall, my non-specialist viewpoint on this paper is that it looks like a rather clear and solid paper, with interesting statements."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is both a theoretical and experimental study of KANs, a recently introduced architecture with parametrizable activation functions,\naiming at characterizing their expressivity and spectral biases in comparison to MLPs. Since KANs offer a lot of new potentialities, it is\na legitimate and relevant research subject to try to characterize these in relation to properties of the data, therefore expressivity and spectral biases constitute obviously a good angle of analysis. \n\nSo the first theoretical result concerns expressivness: it tells that an arbitrary MLP (with activation function in C^k typically) can be exactly mapped onto a KAN (with B-splines of order k) with a depth multiplied by 2 and requiering a grid size of 2, so overall the number of parameters is multiplied by 4 (if I interpret well the result, this could be stated more explicitly actually) in this mapping. In reverse they find a mapping of a KAN to an MLP with an increase of parameters of G, the grid size of the original KAN. These constructions lead the authors to conclude qualitatively that KAN are more expressive than MLP. As acknowledged by the authors, this is not a sharp result as it is not excluded that a better construction of the mappings could be found leading to different conclusions, but these constructions are interesting on their own. A more concrete statement on expressivity is given by corollary 3.4, giving the speed of approximating of any function on a Sobolev space w.r.t the number of parameters of a KAN. Here, as a non specialist of this kind of result I become slightly confused. The statement is seemingly based on generic results on MLP, and seems not to be specific to KAN but more to multi-layer models, as it exploit the aforementioned mapping of the KAN to MLP, so that I wonder why MLP with relu-k would not lead to the same statement?\nThere is sentence discussing this at the end of the paragraph which is cryptic to me as it refers to some apparently non-standard MLP with relu-k. As a result the overall take-home message of this part is unclear to me. Is there any advantage to be attributed to KAN wrt to comparable MLP under this measure of expressiveness? Maybe the statement 3.4 is a profound and central statement of the paper, but the discussion should make it clear, by first giving how standard MLP with relu-k compare and then also maybe what are the practical consequence of such statement if any?\n\nThe second theoretical point concerns spectral bias, and theorem 4.1 to me is a nice and non-trivial statement showing that a single layer of B-splines is well conditioned, especially when the grid size becomes large, by looking at the spectrum of the Hessian of the loss. This fully characterizes the conditioning of the problem with explicit dependency w.r.t. embedding dimension and grid size. The comparison is done with MLP with Relu using a result given in Hong et al. (2022) obtained with similar techniques (Courant Fisher theorem) showing a much larger spectral bias for MLP. I wonder however if a comparison with the spectral bias of MLP with relu-k (unfortunately not to given in Hong et al, but I presume that similar techniques would lead to the result) would not be more relevant as they are in the same class of regularity as KAN with B-splines of order k?\n\nThe experimental part focuses on the spectral bias question on diverse examples. A comparison is shown between KAN with kth order B-splines and MLP with relu, showing a clear advantage to KAN regarding spectral bias, but again I wonder why the comparison is not done with the spectral bias of MLP with relu-k, if this would not be be more meaningful?"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I found however a potential problem of coherence concerning the comparison of KANs with MLPs: with relu-k at the beginning which is indeed legitimate, and then basic relu starting from corollary 3.4."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "the paper's scope is a generic computation architecture. no ethical aspects are relevant here."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. could you expand on the consequences of Corollary 3.4\n2. would be interesting to understand the consequence of thr.3.2 and th.3.3 for more \"mainstream\" architecture. For example, relu^k k=1 is probably the most common, while when gradients are needed SiLU SiLU-based activation functions are required, which is typical for scientific applications\n3. would be nice to expand the Hessian analysis and connect with the NTK one\n4. would be possible to perform the spectral analysis for section 4.4, instead of the error trend, if not, what is the test error?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "As described above:\n\n1. find the connection between two specific classes of MLP and KAN. \n2. interpretation of the spectral bias using the Hessian matrix\n3. some experiments to visualize the spectral bias"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the expressivity Kolmogorov-Arnold Networks (KAN) and the spectral bias.\n\nA rigorous analysis of the representation power of the KAN vs MLP is welcome. \n\nI think the most important contribution of the paper is to show the relationship between MLP with ReLU^k activation function and KAN without bias. Th.3.2 and Th.3.3 bounds are not tight. A conclusion of the authors is that MLP scales with G^2 while KAN with G, the grid size. The additional contribution Cor3.4 is not well explained in relationship to MLP, but the authors highlight the potential advantage of KANs. \n \nThe Analysis of the spectral bias seems a repetition of the analysis provided in [1], even if the authors analyze the Hessian matrix instead of the NTK matrix and show the elements of the Hessian. The authors are able to justify the reason why low frequency are not prioritized as in MLP, even if the connection with Th.4.1 is not clear. \n\nThe analysis of PDE, seems to be the same are reported in [1], showing better performance of KAN-based models in the training loss. Not sure why the test loss is not reported. \n\n\n[1] 1. Wang, Y. et al. Kolmogorov Arnold Informed neural network: A physics-informed deep learning framework for solving forward and inverse problems based on Kolmogorov Arnold Networks. Preprint at https://doi.org/10.48550/arXiv.2406.11045 (2024)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "As reported in the summary:\n1. the theoretical analysis is limited to specific class of MLP and KAN\n2. experiments seem very similar to [1], so it is not clear, apart from Fig.1 and Fig.2, why they are included."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024on,\ntitle={On the expressiveness and spectral bias of {KAN}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=ydlDRUuGm9},\nnote={under review}\n}"
},
"abstract": {
"value": "Kolmogorov-Arnold Networks (KAN) \\cite{liu2024kan} were very recently proposed as a potential alternative to the prevalent architectural backbone of many deep learning models, the multi-layer perceptron (MLP). KANs have seen success in various tasks of AI for science, with their empirical efficiency and accuracy demostrated in function regression, PDE solving, and many more scientific problems.\n \nIn this article, we revisit the comparison of KANs and MLPs, with emphasis on a theoretical perspective. On the one hand, we compare the representation and approximation capabilities of KANs and MLPs. We establish that MLPs can be represented using KANs of a comparable size. This shows that the approximation and representation capabilities of KANs are at least as good as MLPs. Conversely, we show that KANs can be represented using MLPs, but that in this representation the number of parameters increases by a factor of the KAN grid size. This suggests that KANs with a large grid size may be more efficient than MLPs at approximating certain functions. On the other hand, from the perspective of learning and optimization, we study the spectral bias of KANs compared with MLPs. We demonstrate that KANs are less biased toward low frequencies than MLPs. We highlight that the multi-level learning feature specific to KANs, i.e. grid extension of splines, improves the learning process for high-frequency components. Detailed comparisons with different choices of depth, width, and grid sizes of KANs are made, shedding some light on how to choose the hyperparameters in practice."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Kolmogorov-Arnold Network",
"Spectral Bias",
"Approximation Theory"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9a60602eb66a2a509b958fa9d0c38206857365af.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "On the expressiveness and spectral bias of KANs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |