id
stringlengths 10
10
| title
stringlengths 3
179
| track
stringclasses 1
value | status
stringclasses 3
values | keywords
stringlengths 2
2.39k
| primary_area
stringclasses 21
values | author
stringclasses 501
values | authorids
stringclasses 501
values | aff
stringclasses 1
value | aff_domain
stringclasses 1
value | position
stringclasses 1
value | rating
stringclasses 355
values | confidence
stringlengths 0
19
| soundness
stringclasses 642
values | contribution
stringclasses 596
values | presentation
stringclasses 782
values | rating_avg
float64 0
9
| confidence_avg
float64 0
5
| soundness_avg
float64 0
4
| contribution_avg
float64 0
4
| presentation_avg
float64 0
4
| corr_rating_confidence
float64 -1
1
| project
stringclasses 1
value | github
stringclasses 1
value | Review
listlengths 2
10
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zCJqgXnV7f | Dynamic Elimination For PAC Optimal Item Selection From Relative Feedback | main | Active | probably approximately correct;optimal item selection;relative feedback;multi armed bandits;Plackett Luce Model;Condorcet winner;Bayesian updates;active learning | reinforcement learning | 3;3;5;6 | 4;3;3;2 | 3;2;2;3 | 2;2;2;2 | 3;2;2;2 | 4.25 | 3 | 2.5 | 2 | 2.25 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How can the sample complexity bounds be further improved to match the practical performance observed in experiments, and is there potential for achieving instance-optimal sample complexity in the PAC best-item setting?\n\nCould the proposed DE and DEBC algorithms be adapted or extended to work with partial or full rankings instead of just identifying the best item, and what challenges might arise in such extensions?\n\nHow would the algorithms perform in settings where item correlations are dynamic or evolve over time, and what adjustments to DE/DEBC might be necessary to handle such changes effectively?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. DE and its extension, DEBC, significantly improve sample complexity for identifying the best item compared to existing algorithms, reducing the number of subset plays needed.\n\n2. The incorporation of inferred updates through item correlation in DEBC provides a robust mechanism to handle correlated item structures, leading to superior performance in certain datasets.\n\n3. The paper extensively evaluates DE and DEBC across various synthetic and real-world datasets, demonstrating their practical effectiveness and robustness across different settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work addresses the problem of identifying the best item from a set of items based on relative feedback, specifically using a method called Dynamic Elimination (DE). DE efficiently prunes sub-optimal items as it progresses, improving sample complexity compared to existing algorithms. The authors also propose an extension, Dynamic Elimination by Correlation (DEBC), which incorporates inferred updates based on item correlations. DEBC significantly outperforms DE in settings where item correlation is strong, reducing sample complexity further. Extensive experiments demonstrate that both DE and DEBC outperform existing state-of-the-art (SOTA) methods in terms of sample complexity across multiple datasets and settings. Additionally, the paper explores future directions for improving sample complexity bounds and extending the methods to partial/full rankings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While DE and DEBC perform well in practice, the theoretical sample complexity bounds provided in the paper are not as tight as their practical performance would suggest, leaving room for further theoretical refinement.\n\n2. DEBC’s performance heavily relies on the strength of item correlations, this raise potential limiting its applicability in scenarios with weak correlations. \n\n3. The paper primarily focuses on cosine similarity for item embeddings and correlations. Extending this to other similarity measures or more general settings is only briefly mentioned and not fully explored.\n\nSome cosmetics: for example on row 330: `we can can combine`."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1: It would be appreciated if the authors could include additional experiments conducted in real-world scenarios. These experiments should aim to demonstrate the effectiveness of the proposed methods in best item identification. Additionally, it is recommended that results be presented using widely accepted metrics such as accuracy, AUC, F1, precision, and recall. \nQ2: The authors might consider providing a more precise and explicit formulation of the item selection problem in Section 3. Furthermore, it would be valuable to discuss in greater detail how DE and DEBC can be applied in practical, real-world contexts."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1: The proposed methods enhance existing approaches by dynamically eliminating suboptimal items, significantly reducing the algorithm's complexity.\n\nS2: By incorporating correlations between items, the proposed methods extend their applicability to items initially absent from the played set.\n\nS3: The authors offer theoretical assurances regarding the sample complexity of DE and DEBC. They also demonstrate that the sample mean of an inferred update sequence serves as an unbiased estimator."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors have introduced a dynamic elimination method for item selection based on relative feedback. They propose two distinct algorithms: one that implements the fundamental dynamic elimination approach and another that incorporates item correlations into the elimination process. The paper provides theoretical analysis on both the sample complexities and the correctness of the proposed methods. Furthermore, the authors demonstrate the proposed methods' ability in terms of reducing sample complexities when compared to several baseline approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1: The DE and DEBC algorithms are designed for the task of item selection, yet the performance of best item identification is neglected. The authors dedicate substantial space to discussing the efficiency of the proposed methods in reducing sample complexity. However, the mathematical formulation lacks clarity, as the best item identification problem and the relative feedback are not thoroughly formulated.\nW2: The proposed DEBC algorithm presumes that item correlations are known to the user, raising concerns about the validity of this assumption. The authors do not address the implications of this assumption in real-world applications or indicate whether it is a common assumption in existing literature.\nW3: The current work lacks demonstration in real-world scenarios. Although the authors mention that learning to rank is crucial in fields like sociology, information retrieval, and search engine optimization, they do not provide examples of its application in these areas. Consequently, the practical applicability of this work remains uncertain."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weakness part."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper studies an important problem. \n\nThe proposed algorithm is complemented with theoretical analysis as well as extensive experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of best-item identification from relative feedback in the setting where a learner adaptively plays subsets of items and receives stochastic feedback in the form of the best item in the set. An algorithm named Dynamic Elimination (DE) is proposed, which dynamically prunes sub-optimal items from contention to efficiently identify the best item. Then the model is extended to capture the generalized linear correlation of items. An algorithm named DEBC, an extension of DE is proposed to handle this extension. The core idea is leveraging the generalized linear correlation to obtain estimates on item win rates without directly playing them by leveraging item correlation information. Extensive experiments are conducted to validate the empirical performance of the proposed algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The algorithmic contribution of this looks narrow. The core idea of DE is flexibly eliminating items once they are deemed suboptimal. This idea is old in the bandit literature. The authors can refer to Chapter 6 of [3] for some reference. Also, a simple google search would gives you a number of work on elimination algorithms. The core idea of DEBC is exploiting the generalized linear structure on the correlation among arms. This idea is also not new since linear structure has been extensively studied in linear bandits, reinforcement learning. Please refer to Part V of [3] from some details. \n\nThe proof techniques of this paper are not new, most of them are drawn from literature. Thus, this paper does not contribute to new proof techniques. To me more specific, compared to [1,2], I do not see enough new ideas in the proof. For example, the analysis of concentration, probability of event, etc., looks very normal. Could the author elaborate on the novelty of the proof? \n\nThe theoretical improvement over SOTA techniques is not clear. The improvement on the sample complexity compared with SOTA works is not stated. How does it improve the sample complexity upper bound? \n\nThe second paragraph of the related work overstated the limitations of previous works without any supporting evidence. Previous algorithms may require up to millions of samples to rank only a few items, but this possibility depends on the setting of the problem. It should not be stated as a general claim. Furthermore, this paragraph is not precise. What do you mean by often? Could you quantify it? \n\nLemma 1 is confusing. It is highlighted as a lower bound, but the sample complexity is stated using the big O notation. \n\n[1] Yisong Yue, et al. The K-armed Dueling Bandits Problem, Journal of Computer and System Sciences, 78(5): 1538–1556.\n\n[2] Björn Haddenhorst. Identification of the Generalized Condorcet Winner in Multi-dueling Bandits, NeurIPS, 2021\n\n[3] Tor Lattimore, et al. Bandit Algorithms. Cambridge press."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "It will help if the authors can have more evidence to demonstrate this paper's results' significance. \nOr if I perceive this paper wrongly, please let me know."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper proposes new algorithms for best item identification and winning rate estimation. These algorithms do have some novelty and are inspiring."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the best item selection problem from noisy multi-wise comparisons. There are totally n items, and at each round the agent can select n_s items to compare, and the comparison will return one item as the winner according to the PL model. The better item will have a higher chance to win the comparison. The problem is to find the best item with 1-\\delta confidence with the least amount of comparisons. \n\nThe authors propose a new algorithm for best item selection from multi-wise comparisons and give the worst-case, best-case, and expected sample complexity. The authors further propose a method to estimate the winning probabilities between two items without directly comparing these two items."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The significance of this paper's results is questionable. For the best item identification, the sample complexity (worst-case) is O(n*\\epsilon^{-2} *\\log(n*n_s^{-1}*\\delta^{-1})). However, in a previous paper [1], when n_s = 2 (i.e., pairwise comparisons), the sample complexity (expected) for best item identification is O(n*\\epsilon^{-2} *\\log\\delta^{-1})) (Theorem 5 of [1]), which is log(n) better than the proposed algorithm. When n_s is large enough like \\Omega(n), the sample complexity of the proposed in this paper will become the same as that in [1]. Hence, the proposed algorithm does not show superiority compared to existing algorithms, or at least be as good as existing ones. Although its sample complexity is worst case instead of in expectation, but this difference is not large enough to support the significance of the new results. \n\nBesides, the winning chance estimation and the sample complexity of best item identification seem not to be correlated enough to be put in the same paper. Putting them in one paper makes the paper's scope to be ambiguous. If the winning chance estimate is significant enough, it is better to be placed in another paper focusing on a more related topic.\n\n[1] Ren, W., Liu, J., & Shroff, N. (2020, November). The Sample Complexity of Best-$ k $ Items Selection from Pairwise Comparisons. In International Conference on Machine Learning (pp. 8051-8072). PMLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Algorithms for PAC optimal item selection from subset wise relative feedback based on suboptimal item dynamic elimination that outperforms SOTA benchmarks; additionally introduce the notion of inferred updates to utilize item similarity information"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dynamic,\ntitle={Dynamic Elimination For {PAC} Optimal Item Selection From Relative Feedback},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zCJqgXnV7f},\nnote={under review}\n}"
},
"abstract": {
"value": "We study the problem of best-item identification from relative feedback where a learner adaptively plays subsets of items and receives stochastic feedback in the form of the best item in the set. We propose an algorithm - Dynamic Elimination (DE) - that dynamically prunes sub-optimal items from contention to efficiently identify the best item and show a strong sample complexity upper bound for it. We further formalize the notion of inferred updates to obtain estimates on item win rates without directly playing them by leveraging item correlation information. We propose the Dynamic Elimination by Correlation (DEBC) algorithm as an extension to DE with inferred updates. We show through extensive experiments that DE and DEBC significantly outperform all existing baselines across multiple datasets in various settings."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"probably approximately correct",
"optimal item selection",
"relative feedback",
"multi armed bandits",
"Plackett Luce Model",
"Condorcet winner",
"Bayesian updates",
"active learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a2d85a2fd9a81e27e2c69845e52423c5bd04e7af.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/9f5f2b950c66f16ca32ace3f35a9c8c1df8594ae.zip"
},
"title": {
"value": "Dynamic Elimination For PAC Optimal Item Selection From Relative Feedback"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zCZnEXF3bN | Do Stochastic, Feel Noiseless: Stable Stochastic Optimization via a Double Momentum Mechanism | main | Active | online convex optimization;stochastic convex optimization | optimization | 5;5;6;6 | 4;3;3;4 | 3;3;3;3 | 2;2;3;3 | 3;2;3;3 | 5.5 | 3.5 | 3 | 2.5 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Have the authors tested out the proposed algorithms (or their adaptive gradient variants) on problems that appropriately reflect problems/setups that are of realistic practical interest? For instance, training transformer based architectures on standard pre-training tasks?\n2. Can the authors clarify how this algorithm's guarantees look like with (a) strong convexity, (b) non-convex but say with a PL style condition?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This is a well written paper that presents two interesting algorithms with strong theoretical guarantees. The algorithms definitely appear novel to my knowledge but I do not know of the latest developments in this area."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents two methods for stochastic optimization that achieve similar guarantees as standard gradient descent and accelerated gradient descent while being robust to the choice of learning rate involved."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The empirical section seems to be fairly underbaked in my opinion."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I am curious to know about your choice of single sample per iterate setting. I understand that this choice might be more convenient to work in theory, but could you still carry the same theory using batch size b?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-organized, with clear explanations of each momentum component and its integration into $\\mu^2$. A convergence guarantee for stochastic optimization with a fixed step-size addresses a significant challenge in optimization.\n\n- The theoretical results are rigorously supported. Extending Theorem 4.2 to the accelerated version in Theorem 5.1 provides a pleasing completeness to the theory. In the former, the learning rate has a dependency on $T$, which prevents it from achieving the accelerated rate. This dependency is eliminated by introduction of the $\\mu^2$-ExtraSGD.\n\n- The theoretical results carry over nicely in the convex experiment, logistic regression on MNIST. $\\mu^2$SGD consistently achieves the best performance across a very wide range of learning rates ($10^{-3}$ to $10^3$). Unfortunately, I am unable to tell where each method lies in the last plot in Figure 1 ($10^{-4}$)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces the $\\mu^2$ (Momentum$^2$) gradient estimator, designed to manage gradient noise in stochastic optimization, particularly in convex and smooth loss scenarios using single-sample batch sizes. This estimator integrates two momentum techniques:\n\n1. **Anytime Averaging**, which averages the query points for the gradient oracle.\n2. **STORM**, a corrected momentum method that averages gradient estimates with bias correction.\n\nBy combining these techniques, $\\mu^2$ achieves a progressively shrinking square error of the gradient estimates ($\\|\\epsilon_t\\|^2 \\propto 1/t$), contrasting with the fixed gradient error in standard stochastic optimization ($\\|\\epsilon_t\\|^2 = O(1)$). This property enables the use of a fixed step-size for convergence, removing the necessity of step-size decay in stochastic settings. Additionally, $\\mu^2$ allows the norm of the gradient estimate to serve as a stopping criterion.\n\nThe paper implements $\\mu^2$ in two algorithms:\n\n1. **$\\mu^2$-SGD**: This combines $\\mu^2$ with SGD. Although $\\mu^2$ could work with other first-order methods, the authors focus on SGD to derive theoretical guarantees. Theorem 4.2 shows that $\\mu^2$-SGD achieves optimal convergence rates for noiseless ($O(L/T)$) and noisy ($O(L/T + \\tilde{\\sigma}/\\sqrt{T})$) settings using a fixed learning rate $\\eta_{\\text{Offline}} = 1/8LT$. Remarkably, this rate does not need to change between noiseless and noisy conditions.\n\n2. **$\\mu^2$-ExtraSGD**: This accelerated variant of $\\mu^2$-SGD uses the ExtraGradient framework to achieve optimal convergence rates of $O(L/T^2)$ (noiseless) and $O(L/T^2 + \\tilde{\\sigma}/\\sqrt{T})$ (noisy) with a fixed learning rate $\\eta_{\\text{Offline}} = 1/2L$.\n\nExperiments on MNIST and CIFAR-10 datasets, using convex (logistic regression) and non-convex (deep learning) models, demonstrate $\\mu^2$-SGD's stability and performance over a range of learning rates compared to various baseline optimizers, including SGD, Polyak momentum, and individual applications of Anytime Averaging and STORM."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- My main critique of this paper goes back to its main promise: with $\\mu^2$SGD, convergence using a fixed step-size is achievable in a stochastic setting. However, although the step-size is fixed, varying momentum parameters, $\\alpha$ and $\\beta$, are required to prove convergence in Theorem 4.2. The decreasing step-size nature necessary for convergence in a stochastic setting seems to be delegated to the momentum parameter here. That being said, I appreciate the theoretical results and the simplicity of the schedule for $\\alpha$. This same schedule was also used in the experiments, and it seems to work well.\n- There is no experiment on $\\mu^2$-ExtraSGD. Observing the accelerated rate in experiments could have potentially highlighted the strength of this method. I would also be curious to see how this acceleration performs compared to the Nesterov accelerated method. \n- Although the paper shows $\\mu^2$’s stability across learning rates, there is little analysis on how sensitive the algorithm might be to momentum parameter choices, especially in non-convex settings where fixed parameters are used."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. By combining two existing methods--Anytime-SGD (Cutkosky, 2019) and STORM (STochastic Recursive Momentum) (Cutkosky & Orabona, 2019), the authors propose $\\mu^2$-SGD.\n\n2. The authors show the error term is upper bounded by $\\frac{1}{t}$ with $\\mu^2$-SGD. Moreover, they obtain an upper bound for the excess loss that gives a noise independent optimal learning rate, which results in a wider range of optimal choice for learning rate compared to SGD. \n\n3. By adding Optimistic-OGD, the authors propse an accelerated version of $\\mu^2$-SGD--$\\mu^2$-ExtraSGD. The optimal learning rate in this case is constant in time so the optimal rate can be obtained without doing time-varying learning rate. \n\n4. The authors have experiments in both convex and non-convex setting to verify their results. Experiments show $\\mu^2$-SGD and $\\mu^2$-ExtraSGD are stable w.r.t different learning rates while other methods don't. Experiments are done thoroughly with details given in the appendix."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Combining two existing methods, the authors propose an accelerated SGD mechanism that is stable w.r.t. learning rate. The authors provide rigorous justifications for their claims and supporting experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main contribution of this work--$\\mu^2$-SGD and $\\mu^2$-ExtraSGD--comes by combining two existing works. The proofs for main theorems are quite traditional. One could argue this lacks novelty, although I personally found the results interesting.\n\n2. In the numerical experiments (non-convex setting), It seems $\\mu^2$-SGD and $\\mu^2$-ExtraSGD only make a difference when the learning rate is far away from normal choices. More precisely, Figure 2 shows all methods are quite similar for $\\eta\\leq1$. $\\mu^2$-SGD and $\\mu^2$-ExtraSGD are better only when $\\eta >1$, which is not a typical choice for learning rate anyway. I am not sure how learning rate stability is appreciated by the community. \n\n3. By allowing a wider range of learning rates, one could argue that with this method, we don’t need to tune the learning rate anymore, like we could set it to be 1. However, $\\mu^2$-SGD and $\\mu^2$-ExtraSGD are not hyperparameter free, i.e. they introduce weights $\\alpha_t$ and Corrected Momentum weights $\\beta_t$, which are chosen to be $t$ and $\\frac{1}{t}$ in the experiments. I wish the authors could give more insights into this topic. If the cost of reducing one hyperparameter is introducing two hyperparameters, I am not sure it’s worth it. \n\n4. The experiments are done for different learning rates, but I found the plots were hard to read when the accuracies/losses got too close. Presenting results with an additional table could help in these cases."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.\tCould the author provide more comparisons between the proposed algorithm and parameter-free algorithms?\n2.\tCould the author include additional experiments on the impact of noise on the algorithm?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Considering algorithms that work in both noisy and noise-free conditions helps improve the robustness of the algorithm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel gradient estimator for stochastic convex optimization, combining momentum-based techniques. Using this estimator, the authors develop robust SGD-style algorithms that achieve optimal convergence rates in both noiseless and noisy settings, maintaining stable performance over a wide range of learning rates."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe structure of this paper makes it difficult to follow, especially in terms of understanding the novelty of the algorithms.\n2.\tThe experimental results presented do not convincingly demonstrate the superiority of the proposed algorithm. The choice of learning rates between 10 and 1000 is unconventional and the hyperparameter settings for STORM are not clearly defined. Furthermore, the generalization benefits mentioned by the author in line 52 has no experimental support.\n3.\tThe novelty of this paper appears to be limited. For example, STORM was originally designed for a non-convex setting, and one of the main contributions of this paper seems to be the redesign of STORM's parameters for a convex setting.\n4.\tMany fully parameter-free algorithms[1,2] have appeared recently, which do not require knowledge of the smoothing constant $L$ and can achieve the same convergence rate. In contrast, this paper still relies on smoothing constants to determine the learning rate. Under fully parameter-free conditions, even a wide range of learning rate options appears to lose its significance.\n\n[1] Khaled A, Jin C. Tuning-Free Stochastic Optimization[J]. arXiv preprint arXiv:2402.07793, 2024.\n[2] Ivgi M, Hinder O, Carmon Y. Dog is sgd’s best friend: A parameter-free dynamic step size schedule[C]//International Conference on Machine Learning. PMLR, 2023: 14465-14499."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Stochastic convex optimization methods that allows easy tuning similarly to (noiseless) Gradient Descent"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024do,\ntitle={Do Stochastic, Feel Noiseless: Stable Stochastic Optimization via a Double Momentum Mechanism},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zCZnEXF3bN},\nnote={under review}\n}"
},
"abstract": {
"value": "Optimization methods are crucial to the success of machine learning, with Stochastic Gradient Descent (SGD) serving as a foundational algorithm for training models. However, SGD is often sensitive to the choice of the learning rate, which necessitates extensive hyperparameter tuning. In this work, we introduce a new variant of SGD that brings enhanced stability in two key aspects. First, our method allows the use of the same fixed learning rate to attain optimal convergence rates regardless of the noise magnitude, eliminating the need to adjust learning rates between noiseless and noisy settings. Second, our approach achieves these optimal rates over a wide range of learning rates, significantly reducing sensitivity compared to standard SGD, which requires precise learning rate selection.\nOur key innovation is a novel gradient estimator based on a double-momentum mechanism that combines two recent momentum-based techniques. Utilizing this estimator, we design both standard and accelerated algorithms that are robust to the choice of learning rate. Specifically, our methods attain optimal convergence rates in both noiseless and noisy stochastic convex optimization scenarios without the need for learning rate decay or fine-tuning. We also prove that our approach maintains optimal performance across a wide spectrum of learning rates, underscoring its stability and practicality. Empirical studies further validate the robustness and enhanced stability of our approach."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"online convex optimization",
"stochastic convex optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0daa0519287b4212f1139087d1d1fdf00f685183.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/53e552f2aed0303eb9fb47136c26f0579d17842e.zip"
},
"title": {
"value": "Do Stochastic, Feel Noiseless: Stable Stochastic Optimization via a Double Momentum Mechanism"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zCncHdGsOa | Efficient optimization with orthogonality constraint: a randomized Riemannian submanifold method | main | Active | Oprimization;Orthogonality constraint;Riemannian optimization;Stiefel manifold | optimization | 5;5;5;6 | 5;4;4;4 | 4;2;3;4 | 3;2;3;3 | 3;3;3;4 | 5.25 | 4.25 | 3.25 | 2.75 | 3.25 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Could you use fast random subsampling methods, such as random Fast JL transform?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The idea of updating the iterate only in a subspace by rotations is not yet well explored and practically useful generalization of the random Riemannian coordinate descent. The paper provides extensive convergence analysis for the methods (although I haven't checked the proofs in the appendices in great detail). The paper is also well written and clearly explains the main concepts. There are multiple numerical experiments in the paper."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a randomized Riemannian submanifold method for solving optimization problems constrained to the set of $n\\times p$ orthogonal frames, also called the Stiefel manifold. The main innovation of the algorithm is to randomly select an $r$-dimensional subspace, in which the current iterate is rotated in a way that locally decreases the objective. In this way, the method can be seen as a Riemmanian analogue to the random block coordinate descent for Euclidean optimization. \n\nThere are two sampling distributions considered for the random selection of the $r$-dimensional subspace: Haar uniform sampling on the orthogonal group and random selection of indices (referred to in the paper as a random permutation matrix). Both of these have the complexity of $nr^2$ although the latter can be computed in a more efficient way due to not needing to perform QR decomposition.\n\nThe paper provides global convergence analysis to critical points and local to a local minima (under PL-ineq.) in expectation and also in high-probability. The method can be also extended/analyzed to the stochastic variant when the objective gradients are random with bounded variance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Writing comments\nAlthough the paper is well written overall, I have some remarks on formulation of certain ideas:\n* Line 161: the authors state that the random submanifold is defined by the random orthogonal matrix $P_k$. This is not true strictly speaking, in fact, I think it is only the first $r$-columns of the matrix $P_k$ that define the subspace for the rotation.\n* Adding on the remark above: Isn't it then enough to generate only a random orthogonal frame from $St(n,r)$ that defines the subspace?\n* Lines 172 - 178: This part describes one of the the main technical observations: that it is possible to do a local approximation of the $\\tilde F(Y)$ by computing retraction around an identity matrix. Can this be theoretically shown that this is a local approximation? It is not immediately clear to me why this is the case?\n* Eq. 4: there is a mistake/typo after the first equality\n* Line 198: this is very minor remark, but does the order of the first $r$ indices in the sampled permutation matrix matter? In this sense, wouldn't it be sufficient to sample $r$ indices without replacement instead of the permutation? Practically this might be the same so it might not matter. \n\n### References on sketching\nThis method seems to be related to random subspace sketching. For example a very recent work of (Shustin & Avron, 2024; https://www.jmlr.org/papers/volume25/21-1022/21-1022.pdf) considers random sketching algorithm for fast optimization over orthogonal constraints. This would be good to discuss in the paper (if indeed related), and possibly also to compare against in the numerical experiments. \n\n### Numerical experiments\nThe paper provides numerous numerical experiments but I would like to see the study of dependency on the choice of the random subspace dimension $r$. In the first experiment, the orthogonal procrustes, I am missing the information of the choice of $r$ altogether. I would also like to see a discussion on the computational cost of the random Haar uniform sampling of O(n) as I imagine this will be computationally complex operation. \n\nOverall, I like the main paper idea, but I am missing more direct comparison with sketching methods in Riemnannian setting as well as the explanation why we can expect (4) to hold as a good first order approximation to minimizing $\\tilde F$. I would also like to see the numerical section improved with more extensive discussion on the choice of $r$ and how it relates to the per-iteration cost."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Theorem 1. How to ensure $X_k$ remains inside $U$ under $X_{k_0} \\in U$? The stochasticity induced by sampling seems problematic. Purely assuming all $X_k \\in U$ may be too strong. \n2. Remark 3. The retraction is at cost of $\\mathcal{O}(np^2)$, while calculating the Euclidean gradient is easy to be $\\mathcal{O}(n^2p)$ flops, as seen in problems like PCA. Why does the Riemannian gradient descent become impractical?\n3. Theorem 2. Is it possible to design a verison of submanifold method that has exact convergence rather than high probability.\n4. Figure 2. Many Stiefel maniofold applications are with small $p$. Could you also add some numerical experiments on small $p$ to see the comparsions?\n5. Figure 5. The improvement on the accuracy appears marginal. The early-stage superior performance might also be achieved by using a larger step size."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The proposed randomized submanifold method includes several coordinate descent methods as specific examples and allows flexibility in setting the size of the submanifold.\n2. Convergence is established in both expectation and high-probability settings, with clear derivations provided."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a randomized submanifold algorithm for solving optimization problems with orthogonality constraints. The authors provide convergence guarantees for the proposed algorithm and conduct numerical experiments. Theoretical and numerical comparisons with existing literature are also presented."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The algorithm does not achieve exact convergence, potentially due to the stochasticity in subspace selection. Is there a way to ensure exact convergence, for example, by adding restrictions on the sampling process?\n2. It appears that the total computational complexity shows no improvement compared to Riemannian gradient descent and may even be higher when using orthogonal sampling. Additionally, the algorithm only converges to a neighborhood rather than a first-order stationary point.\n3. The numerical comparisons are insufficient. It would be helpful to see the effects of different submanifold sizes $r$ and small $p$ values."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. As cited in line 88 of the manuscript, the literature [1] proposed a randomized method, RSSM, which also updates on a low-dimensional random submanifold. Could you compare it with the method in this work in terms of computational cost per iteration? Additionally, is there any relationship between RSSM and RSDM?\n2. In line 382 of the manuscript, it states that analysis for Theorem 3 can be easily extended to mini-batch gradient descent. Can the authors explain this point briefly?\n3. In the experiments, the parameter $p$ is consistently close to $n$, for which the proposed RSDM outperforms other methods. It would be beneficial to demonstrate at what percentage of $n$ the value of $p$ needs to reach before RSDM begins to show its advantage.\n4. In Section 7.4, the numerical results of the deep learning task seem confusing, where the accuracy hovers around $40\\\\%$ to classify CIFAR10. Additionally, only the tendency of accuracy is presented; including the training/test loss would be more illustrative.\n\nI would like to increase my grade if the concerns are well addressed.\n\n[1] Randomized submanifold subgradient method for optimization over Stiefel manifolds. 2024"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The proposed RSDM is novel, which updates on a low-dimensional random submanifold instead of the full Stiefel manifold. In this way, it reduces the computational costs in each iteration, contributing to large-scale optimization.\n2. The paper provides a comprehensive theoretical analysis, including general nonconvex optimization problems, nonconvex functions satisfying the PL condition, and the results in stochastic settings. The discussion about the trade-off between efficiency and convergence is interesting.\n3. The effectiveness and efficiency of the proposed algorithm are validated on an array of numerical experiments, and the source code is available."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a randomized Riemannian submanifold descent method (RSDM) to address large-scale optimization with orthogonality constraints. Specifically, it mitigates the computational costs associated with the retraction in each iteration by updating on a low-dimensional random submanifold rather than the full Stiefel manifold. Two sampling strategies, orthogonal and permutation sampling, are provided. Theoretical analysis guarantees convergence, and extensive experiments demonstrate the effectiveness and efficiency of RSDM."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The contribution seems limited in the scenario when $p=\\Omega(n)$. However, numerous applications typically involve the setting $p\\ll n$. Specifically, as discussed in Remark 3, the total complexity of the standard Riemannian gradient descent is $O(np^2\\epsilon^{-2})$, while the proposed method costs $O(n^3\\epsilon^{-2})$, which appears as a theoretical gap."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. In Line 69 and Line 92, the authors say that the complexity of non-standard linear algebra reduces to O(r^3) from O(nr^2). I understand O(r^3) comes from the cost of retraction operation. However, the per-iteration cost is still O(npr) or O(nr^2) (see Section 4.1 and Section 4.2). It would be better to clarify these in the statement.\n2. In line 142 and line 146, it is not suitable to use “denote … as …”. Besides, in line 155, what is the meaning of $X^*$?\n3. It can be better to explicitly write down the expression grad F_k(I_n) arising in (4) using the Euclidean gradient. Although it is very simple, it can help readers understand lines 212-213.\n4. In Line 222, why does the gradient computation only require O(nr^2)?\n5. Could the authors add more discussions on the parallel implementation?\n6. Could the authors get rid of the assumptions in Line 304? \n7. In line 484 of Section 7.3, it should be Figure 4 instead of Figure 2. In addition, if possible, could the authors also compare their RSDM with RSSM in (Cheung-Wang-Yue-So ‘24) in Line 577?\n8. In Line 467-470, Can the phenomenon of switching from sublinear rate to linear rate be explained by the error bound property (or the Riemannian PL property)? See Theorem 1 and Theorem 2 in “Liu H, So A M C, Wu W. Quadratic optimization with orthogonality constraint: explicit Łojasiewicz exponent and linear convergence of retraction-based line-search and stochastic variance-reduced gradient methods[J]. Mathematical Programming, 2019, 178(1): 215-262.” The authors can add some discussion if this is related.\n9. The update of RSDM is similar to the algorithm in \"A Block Coordinate Descent Method for Nonsmooth Composite Optimization\nunder Orthogonality Constraints\" (https://arxiv.org/pdf/2304.03641). For example, the formula in Line 215 is similar to (12) in the reference. Could the authors compare RSDM with the algorithm in the reference and highlight the differences more explicitly?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. This paper proposes a novel random submanifold descent method (with two sample strategies), which enables low-cost retractions and low per-iteration complexity. \n2. The authors provide a thorough convergence analysis under the non-convex setting, the local Riemannian PL setting, and the stochastic setting.\n3. The proposed algorithm outperforms other existing methods in various problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper deals with large-scale optimization problems with orthogonality constraints. To overcome the computational bottleneck of the retraction operator, the authors propose a novel \"subspace\" technique that updates variables on random submanifolds, reducing per-iteration complexity. They introduce two strategies for selecting these submanifolds and analyze the convergence of their methods for both general nonconvex functions and those with local Riemannian PL conditions. Besides, the approach is also applicable to certain quotient manifolds. Experimental results demonstrate the method's effectiveness across various problems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In line 87, the authors mentioned the poor parallelization of existing methods. However, they did not say too much about why RSDM can be implemented in parallel. In line 93, the authors say “allowing parallel computation”. Adding more discussion and exploration in this aspect can greatly strengthen this paper. For example, elaborating more on the parallel implementation of RSDM or even showing the speedup compared to the sequential implementation in the experiment part would be helpful.\n2. We know from Definition 1 that the Riemannian PL holds locally. In Line 304 of Theorem 1, it says “Suppose k0 large enough such that $X_{k0} \\in U$”. I understand that this assumption is because the iterates may not always stay in the local region due to algorithmic randomness. Is it possible to get rid of such an assumption? This seems difficult to me. Similar things happen in other theorems."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024efficient,\ntitle={Efficient optimization with orthogonality constraint: a randomized Riemannian submanifold method},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zCncHdGsOa},\nnote={under review}\n}"
},
"abstract": {
"value": "Optimization with orthogonality constraints frequently arise in various fields such as machine learning, signal processing and computer vision. Riemannian optimization offers a powerful framework for solving these problems by equipping the constraint set with a Riemannian manifold structure and performing optimization intrinsically on the manifold. This approach typically involves computing a search direction in the tangent space and updating variables via a retraction operation. However, as the size of the variables increases, the computational cost of the retraction can become prohibitively high, limiting the applicability of Riemannian optimization to large-scale problems. To address this challenge and enhance scalability, we propose a novel approach that restricts each update on a random submanifold, thereby significantly reducing the per-iteration complexity. We introduce two sampling strategies for selecting the random submanifold and theoretically analyze the convergence of the proposed method. We provide convergence results for general nonconvex functions and functions that satisfy Riemannian Polyak–Łojasiewicz condition as well as for stochastic optimization settings. Extensive experiments verify the benefits of the proposed method, showcasing its effectiveness across a wide variety of problem instances."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Oprimization",
"Orthogonality constraint",
"Riemannian optimization",
"Stiefel manifold"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/344ec539980c9f2d76d94044fdfcf6c1c815ef9e.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/14fc03e4fdc748559eb4d12180a073724be0a254.zip"
},
"title": {
"value": "Efficient optimization with orthogonality constraint: a randomized Riemannian submanifold method"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zCxGCdzreM | Kinetix: Investigating the Training of General Agents through Open-Ended Physics-Based Control Tasks | main | Active | reinforcement learning;open-endedness;unsupervised environment design;automatic curriculum learning;benchmark | reinforcement learning | 5;6;6;8 | 4;4;3;3 | 2;3;3;3 | 2;3;3;4 | 3;3;4;4 | 6.25 | 3.5 | 2.75 | 3 | 3.5 | -0.688247 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See Weaknesses section. Additionally,\n- The range of the y axis for the four plots on the right in Figure 5 are missing. Are these also from 0 to 1?\n- How long does training take for training on 1B Kinetix environments? Would be good to see if training on such a large number of environments itself would be a bottleneck for learning generalist agents."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Kinetix provides 66 hand-designed levels while having the option to edit tasks with a graphical editor or to randomly generate more levels with rejection sampling. \n- The unified goal and dynamics within all environments encourage policies to have physical reasoning capabilities instead of merely memorizing the solution for some particular task, which is a valuable objective for researchers to pursue. \n- Kinetix provides a way to generate unlimited environments and tasks with a unified goal, objects, and dynamics, which could be of interest to multiple research communities like generalist RL policy learning, meta-learning, world modeling, spatial understanding and physics reasoning, and so on."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Kinetix, a new 2D simulated benchmark designed for training generalist agents with capabilities in fine-grained motor control, navigation, planning, and physical reasoning. The benchmark is built on a novel hardware-accelerated physics engine called Jax2D. Kinetix enables the procedural generation of a vast amount of environments using simple shapes, joints, and thrusters, allowing for tasks that include robot locomotion, object manipulation, simple video games, and classic reinforcement learning scenarios. Each environment shares a unified objective: “make the green shape touch the blue shape without touching the red shape,” enabling zero-shot generalization to new environments within the same distribution. Experimental results show that policies trained across a wide range of environments generalize better to unseen tasks, and fine-tuning these generalist policies on new tasks yields improved performance over training from scratch. This work contributes to the development of generalist RL agents and open-ended physics-based control tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper notes that as the generated environments increase in complexity, they may become unsolvable, which could contribute to the lower performance observed in the Large-level environments. If so, how does this impact the usability and interpretability of the benchmark results? To what extent does this affect the performance results reported in Figure 3?\n- It is unclear whether the proposed benchmark supports visual observations, which are essential for training generalist policies and building agents that can operate in real-world settings.\n- Although Kinetix can generate a vast range of environments, it is unclear how this benchmark would generalize to tasks or environments outside of its defined task distribution."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Kinetix enables a wide distribution of morphologies and initial environment scene configurations, but it doesn’t deviate beyond the single unified goal. How can it be expanded to also cover a wide distribution of goals and potentially even task specifications?\n\n2. Does Kinetix support parallel pixel rendering, such as for vectorized image-based experiments?\n\n3. Is SFL only choosing between the S, M, and L levels?\n\n4. Are the inputs into the policy purely one-hots? (One-hot encoding for each of the polygons, thrusters, and shapes.)\n\n5. Is the (x, y) 2D position of each polygon an input into the network? It would seem that positional embeddings not of the ordering of polygons, but their actual spatial positions, would matter a great deal in this problem.\n\n6. In section 4, authors write that Kinetix is a deterministic setting (thus satisfying one of the conditions for using SFL). How is Kinetix deterministic, given that environments are randomized?\n\n7. How many environments were used during training, and how many environments were held-out for zero-shot evaluation?\n\n8. What was the generalist policy in Figure 5 trained on? A distribution of environments over all 4 tasks at their hard level?\n\n9. Say we train an agent only trained on L levels. How does it perform on held-out levels of a different difficulty (M and S)?\n\n10. Heatmaps were an interesting way to convey the agent’s performance. Why not simply graph the agent’s x distance from the goal, with x-axis being the training iterations?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Introduces a physics engine that provides “almost entirely dynamically specified” scenes, where environments with different robot morphologies can be vmap-ed and run in parallel, which is not doable with prior Jax-based sim frameworks like Brax.\n\n2. Paper is clearly written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Jax2D physics engine, which is a reimplementation of Box2D but written in Jax, and introduce the Kinetix environment on top of Jax2D. Kinetix allows for procedurally generated open-ended environments with different robot morphologies. Authors create a self-attention-based policy and demonstrate performance zero-shot, with pretraining, and with finetuning on the target environments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. All environments in benchmark must fall under the goal of making green shape touch blue shape without touching red shape. This seems to mainly constrain the problem to single-step tasks, where the reward of minimizing the distance from green to blue always incentivizes progress. Was this unified goal constraint purposefully imposed by design, or was it a constraint of Jax implementation, where the reward function for all environments must be the same to be parallelizable?\n\n2. Authors emphasized that parallelism and speed were big advantages of Jax2D. Since it is a reimplementation of Box2D, and this is a critical contribution of the paper, what are the performance gain metrics over Box2D?\n\n3. Experiments were on multi-discrete action space with binary rewards. However, it would strengthen the argument of the paper to do experiments on more of the important features of Kinetix, such as pixel-based observations and continuous action space.\n\n4. The state representation of the policy is very specific to the Kinetix environment suite and not very generalizable to other 2D RL problems. For instance, each entity is encoded separately and there is no scene-level encoding that is passed in as observation for the policy. Often, it is essential for a policy to understand the entire scene when predicting an action.\n\n5. There were no supplementary materials submitted, which would have been a good opportunity to show video rollouts of the trained agent in action.\n\n6. Experiments were mainly limited to the improvement of finetuned policies over pretrained and task-specific, trained-from-scratch policies. However, I would have liked to see more experiments that provide additional insights beyond “finetuning is mostly good” and “zero-shot mostly doesn’t work.” For instance, using Kinetix for lifelong learning, transfer learning, and cross-embodiment learning.\n\n7. Abstract sentence seems like an oversell, given the results. “Our trained agent exhibits strong physical reasoning capabilities, being able to zero-shot solve unseen human-designed environments.” Most would also disagree with the 2D learned behaviors as “strong physical reasoning capabilities.”\n\n8. Minor: I think the wrong citation was provided for MJX in Section 7 (that work seems to be Mujoco).\n\n9. Minor: Experiments would benefit from some comparison to prior approaches/architectures, though this is less important given this is mainly a systems/framework paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- For the environment generator, it is mentioned that there may exist unsolvable levels which automatic curriculum methods can filter out. Could you clarify what was done here?\n- How does the choice of algorithm affect the performance in your benchmark. Do you anticipate releasing a dataset of transitions from Kinetics which can be used for offline 2 online RL?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Provide a highly efficient 2D rigid-body physics engine, leveraging JAX for scalable computation, with speedups of up to 30x when training an RL agent, allowing for \n- The learnt agent is highly effective at Zero-Shot transfer in the S and M levels that are held out, indicating the efficacy of pre-training on a wide set of procedural generation tasks. Additionally, show faster convergence/higher performance with this initialization\n- Have interpretable/handmade levels to understand the performance on different sizes/difficulties of tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a framework for procedurally generated 2D physics-based tasks to learn an agent for physical control that can effectively transfer to tasks that involve physical control. They provide a hardware-accelerated physics engine that allows for cheap/efficient simulation to generate a mixed-quality pre-training dataset for online RL. The authors additionally provide human interpretable handmade levels in Kinetix to understand the type of tasks and interactions the agent must do. The authors show the efficacy of both zero-shot transfer to new tasks and the agent when fine-tuned on a new task. The authors evaluate their agent on classic RL environments such as Cartpole and video games like Pinball."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The JAX2D environment seems to be somewhat limited in its expressivities, modeling only 4 unique entities, which may not transfer to a wide set of domains/tasks outside of the ones studied. \n- The task/reward function seems to be fixed across all environments to collide the green and blue shaped objects, while avoiding red shapes. Additional reward shaping seems to be needed for effective training, leading to some limited applicability of generating this data at scale for any set of tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "They are mentioned in the Weaknesses."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is well-written, organized, and straightforward.\n- Extensive testing across various task complexities validates its robustness in diverse 2D environments.\n- This paper has strong potential to serve as a valuable benchmark for future research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Kinetix, a 2D physics-based RL environment aimed at enhancing generalization in RL agents. Leveraging the simulation, the agent is pre-trained for billions of steps, enabling zero-shot evaluation on novel tasks. Fine-tuning further improves performance, surpassing traditional RL methods. The approach integrates a transformer architecture with PPO as the core RL algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Real World Tasks:** While this paper provides a strong foundation in 2D simulations, expanding its scope to assess the agent’s adaptability to real-world tasks, such as 3D simulations or complex dynamics as seen in [1,2], would enhance its practical relevance. Bridging this gap could amplify the study’s contributions, offering broader insights into real-world generalization and scalability.\n\n- **Filtering out:** The authors mention that trivial and unsolvable levels are filtered out. What quantitative metrics were used to determine this filtering.\n\n- **Generalizability:** The claims of generalizability might be overstated given that the tasks remain in controlled simulations. Could the authors clarify the expected limitations of deploying such an agent in real-world scenarios with unpredictable environmental factors?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Training with reinforcement learning on a vast open-ended distribution of physics-based tasks leads to an agent that can zero-shot solve human-designed problems."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024kinetix,\ntitle={Kinetix: Investigating the Training of General Agents through Open-Ended Physics-Based Control Tasks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zCxGCdzreM},\nnote={under review}\n}"
},
"abstract": {
"value": "While large models trained with self-supervised learning on offline datasets have shown remarkable capabilities in text and image domains, achieving the same generalisation for agents that act in sequential decision problems remains an open challenge.\nIn this work, we take a step towards this goal by procedurally generating tens of millions of 2D physics-based tasks and using these to train a general reinforcement learning (RL) agent for physical control.\nTo this end, we introduce Kinetix: an open-ended space of physics-based RL environments that can represent tasks ranging from robotic locomotion and grasping to video games and classic RL environments, all within a unified framework.\nKinetix makes use of our novel hardware-accelerated physics engine Jax2D that allows us to cheaply simulate billions of environment steps during training.\nOur trained agent exhibits strong physical reasoning capabilities, being able to zero-shot solve unseen human-designed environments. Furthermore, fine-tuning this general agent on tasks of interest shows significantly stronger performance than training an RL agent *tabula rasa*. This includes solving some environments that standard RL training completely fails at.\nWe believe this demonstrates the feasibility of large scale, mixed-quality pre-training for online RL and we hope that Kinetix will serve as a useful framework to investigate this further.\nWe open-source Jax2D, Kinetix, and our final model weights."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"reinforcement learning",
"open-endedness",
"unsupervised environment design",
"automatic curriculum learning",
"benchmark"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5e42f925c25f98147717c4f0ee2a220bd7d4caa0.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Kinetix: Investigating the Training of General Agents through Open-Ended Physics-Based Control Tasks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zDC3iCBxJb | Group Ligands Docking to Protein Pockets | main | Active | molecular docking;ai4science | applications to physical sciences (physics, chemistry, biology, etc.) | 3;5;8;8 | 5;5;4;3 | 2;3;3;4 | 2;2;3;3 | 2;3;3;4 | 6 | 4.25 | 3 | 2.5 | 3 | -0.852803 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Refer to the content in the Weaknesses."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "(1) The writing and organization of this paper are very clear.\n\n(2) This paper is intriguing because it is based on the idea of enhancing the binding capability of the current ligand by considering the binding positions of other ligands that target the same protein."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a molecular docking framework called GROUPBIND, which enhances the binding capability of a ligand to a target protein pocket by leveraging other ligands that bind to the same pocket. \nThe framework introduces message padding among groups of ligands and a triangle attention module for protein-ligand pairs. Experimental results validate that GROUPBIND improves docking performance based on diffusion models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) Why are the results of DIFFDOCK in Table 1 worse than those in the original paper, and it seems that the bold text annotations might be inaccurate?\n\n(2) From Figure 1, we can see that molecules binding to the same pocket indeed have similar structures, but how many pockets in the dataset exhibit this situation? Is there any statistical data on the number of pockets and the corresponding similar ligands in the PDBBind dataset? \n\n(3) During inference, when searching the database for ligands similar to the query ligand, how many entries in the test set can retrieve similar ligands? If similar ligands cannot be retrieved, does that mean the model becomes ineffective?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tThe definition of \"ligand ground graph\" needs explicit formalization. The current lack of clarity makes understanding this crucial concept challenging. For example, when referring “noisy group ligands” in line 214, it is difficult to understand this concept.\n2.\tThe references to tables and figures are confusing. Avoid using \"Section\" to denote figures, tables, and section of manuscript at the same time. Use \"Figure\" when referring to images (e.g., Figure 1, Figure 2) and \"Table\" for tabular data (e.g., Table 1, Table 2). \n3.\tA thorough proofread is necessary to correct spelling and grammatical errors. For instance, \"beyound\" on line 101 should be corrected to \"beyond.\" A comprehensive review of the entire text is recommended.\n4.\tFigure 7's inclusion of Figures 3, 4, 5, and 6. For better visual organization and clarity, these figures should be presented as subfigures within a single figure (e.g., Figure 7a, 7b, 7c, and 7d). This allows for easier comparison and a more streamlined presentation.\n5.\tLacking explanation of \"NG\" in Figure 6. While the meaning of \"NG\" in Figure 6 might be discernible from the main text, its meaning should be explicitly stated. This ensures clarity and avoids requiring readers to search through the text for an explanation. A concise definition or clarification of \"NG\" within the caption is crucial."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The idea of leveraging similar binding poses among ligands targeting the same protein is intriguing and biologically relevant.\n2. The experimental results suggest that incorporating augmented ligands improves docking performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces GroupBind, a blind rigid docking method predicated on the biochemical observation that ligands binding to the same target protein often adopt similar poses. GroupBind employs an interaction layer for a group of ligands and a triangle attention module to embed protein-ligand and group-ligand pairs. Performance is evaluated on the PDBbind dataset."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The core idea is similar to MCS (Maximum Common Substructure) docking [1, 2], which assume that ligands with similar substructures exhibit similar docking poses. GroupBind, however, assumes all ligands share similar docking poses. Figure 1 depicts highly similar ligands with similar docking structures. Conversely, [3] (Figure 4) illustrates cases where structurally distinct ligands adopt distinct poses. A statistical comparison quantifying the difference between the MCS docking assumption and GroupBind's assumption is warranted.\n\n2. Comparing GroupBind-Ref against blind docking methods like DiffDock is unfair. DiffDock performs blind docking, whereas GroupBind-Ref utilizes prior knowledge of the binding pocket, making it a site-specific docking method. This introduces a significant advantage for GroupBind-Ref.\n\n3. The evaluation should include more baselines such as FABind [5] and FABind+ [6], for which source code is available. Expanding the evaluation to include datasets like PoseBuster would assess GroupBind's ability to predict physically plausible structures.\n\n4. The reported top-1 docking success rate for DiffDock (32.4% with 40 samples in Table 1) appears considerably lower than previously \nreported results (38.2% in [4] and 36.0% in [5]). This discrepancy requires clarification.\n\n5. The clarity of the writing could be improved. Specific points of confusion are detailed below in the Questions section.\n\n[1] A High-Quality Data Set of Protein-Ligand Binding Interactions Via Comparative Complex Structure Mod, 2024\n\n[2] FitDock: protein-ligand docking by template fitting, 2022\n\n[3] DeltaDock: A Unified Framework for Accurate, Efficient, and Physically Reliable Molecular Docking, 2024\n\n[4] DiffDock: Diffusion Steps, Twists, and Turns for Molecular Docking, 2022\n\n[5] FABind: Fast and Accurate Protein-Ligand Binding, 2023\n\n[6] FABind+: Enhancing Molecular Docking through Improved Pocket Prediction and Pose Generation, 2024"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.\tThe authors should account for the effect of protein similarity on the results by performing redundancy removal on the test set proteins that are either duplicated or highly similar to those in the training or validation sets. Tools like MMseqs or other alternatives could be used for this process.\n2.\tThe authors need to explain how the similarity between ligands in the group ligand set impacts the results.\n3.\tHow would the results of GROUPBIND change if tested on the latest PoseBuster (version 1 and version 2)?\n4.\tIn Figure 4 on page 8, does the success rate refer to Top 1 or Top 40? What do “SG” and “AG” specifically mean? These details should be clarified in the figure caption.\n5.\tWhat does Figure 5 on page 8 illustrate, and where is it referenced in the article?\n6.\tIn Figure 6 on page 8, does the success rate refer to Top 1 or Top 40? The meanings of “NG”, “SG”, and “AG” should also be clarified in the figure caption.\n7.\tOn line 457 on page 9, the percentage 36.3% is mentioned twice, which could cause confusion and should be clarified."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The study proposes a new molecu lar docking framework to simultaneously consider multiple ligands docking to a protein."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a deep learning model for molecular docking, which improves data utilization and model quality by docking multiple molecules to the same protein. It further enhances the connections between similar atoms across different molecules and the same protein amino acids using a triangular perceptual network. Through this multi-molecule docking approach, the model surpasses existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, this paper offers a certain level of contribution, but the experimental section requires clearer descriptions and further discussion. For the figures in the paper, the authors should provide detailed captions, including explanations of the methods used."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How does your method compare to ComBind in a head-to-head comparison? Answering this would significantly increase my enthusiasm for the paper as, in addition to providing a more relevant baseline, it would involve testing using ligands without known structure as augmentation ligands (assuming the ComBind evaluation framework is used)."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Particularly during the lead optimization phase, it is reasonable to expect that there are a number of known ligands for the target protein and using this information to improve docking performance could help in the discovery of better ligands.\n\nA reasonable approach for message passing across ligands with triangle attention is described. This might provide a more general template for other tasks where output are represented as graphs and there is a known consistency bias.\n\nThis approach does not require the accessory ligands to have a known structure at inference time, which is a realistic scenario.\n\nInformative ablation studies are performed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper describes an approach for leveraging the insight that similar ligands that bind to the same protein target are expected to binding similarly. A method for attending between ligands as part of a diffusion docking process is described and the results convincingly show the benefit of this approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The contributions are inappropriate as written. This is not the first time the concept that multiple similar molecules can be used to enhance molecular docking has been described. ComBind (Paggi et al, 2021; cited in the paper) does exactly that. There are other methods that use this insight in different ways (e.g. selecting poses from ensemble docking). The contributions should be qualified that this is the first end-to-end deep neural network approach to molecular docking that uses this insight.\n\nThe results only compare to single-ligand docking. Comparing to ComBind (or OpenComBind if lacking a Schroedinger license) would be more relevant. Can the diffusion model approach make better use of the similarity bias than previous methods? This question goes unanswered.\n\nNot going beyond the PDBbind to identify alternative augmentation ligands (of which there are many, as structures aren't needed for inference) is a missed opportunity that weakens the paper as with the current evaluation framework many ligands can't be put into groups. Replicating the ComBind evaluation would provide predefined groups of ligands while also making it possible directly compare to a conventional approach.\n\nThere are issues with using a time split to assess generalizability, but as this is the same split used with DiffDock it is appropriate to use for the comparisons performed here.\n\nThe overloading of \"k\" to mean two different things in Fig 2 is confusing.\n\nI found equations (4) and (5) confusing due to the overloading of z - the text says these two separate values (presumably equivalent to AF2 incoming and outgoing triangle attention edges) will be stacked, while the equations say they are summed.\n\n\"and since the C-C bond length is about 1.5A.\"\n\nTable 2 is apparently showing Top-10 results, but this fact is only stated in the main text. Why not replicate the reporting in Table 1? \"Med.\" isn't defined, but is presumably median RMSD.\n\n4.4: \"Section\" is used when \"Figure\" is meant. \"ifself\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024group,\ntitle={Group Ligands Docking to Protein Pockets},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zDC3iCBxJb},\nnote={under review}\n}"
},
"abstract": {
"value": "Molecular docking is a key task in computational biology that has attracted increasing interest from the machine learning community. While existing methods have achieved success, they generally treat each protein-ligand pair in isolation. Inspired by the biochemical observation that ligands binding to the same target protein tend to adopt similar poses, we propose \\textsc{GroupBind}, a novel molecular docking framework that simultaneously considers multiple ligands docking to a protein. This is achieved by introducing an interaction layer for the group of ligands and a triangle attention module for embedding protein-ligand and group-ligand pairs. By integrating our approach with diffusion based docking model, we set a new state-of-the-art performance on the PDBBind blind docking benchmark, demonstrating the effectiveness of our paradigm in enhancing molecular docking accuracy."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"molecular docking",
"ai4science"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a720afd306b2b62a4a50993134d384d54f5022ea.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Group Ligands Docking to Protein Pockets"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zDJNUDprhW | Uncoupled and Convergent Learning in Monotone Games under Bandit Feedback | main | Active | online learning;game | learning theory | 3;3;5;6 | 3;3;4;3 | 2;1;2;3 | 2;1;2;3 | 2;2;3;3 | 4.25 | 3.25 | 2 | 2 | 2.5 | 0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weakness section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- The authors attempted to improve the state-of-the-art last-iterate convergence rate for general monotone game and their time-varying variants --- the existing best bounds are $T^{-1/2}$ for strongly monotone games by Lin et al. (2021), and $T^{-1/6}$ for linear games by Cai et al. (2023)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper design an algorithm for monotone games with bandit feedback that has last-iterate convergence guarantee. The convergence rate is $T^{-1/4}$ for monotone games, and $T^{-1/6}$ for linear games."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I have concern about the soundness of the results. At the first sight, there is a strange gap between the results for general monotone game and the linear game in Table 1: for general monotone game the bound is $T^{-1/4}$, and for linear game the bound is $T^{-1/6}$. However, linear game is a special case of monotone game. The author claimed that for linear games, one cannot find a convex function $p$ that satisfies the definitions in Line 217. However, for any linear game, one can always artificially add a perturbation function $\\epsilon ||x||^2$ to make it strictly monotone. Applying the $T^{-1/4}$ bound for general monotone game, and bounding the error due to perturbation, one should be able to get a bound of $T^{-1/4}+\\epsilon$ for linear games. Then taking $\\epsilon\\to 0$, it seems we should also get a $T^{-1/4}$ bound also for linear games. \n\n- It seems the main gap lies in Theorem 5.1. In the theorem, although the right-hand side of the bound is $T^{-1/4}$, the left-hand side is not the usual Euclidean distance to the equilibrium, but the distance induced by the $p$ function defined in Line 217. This distance could be arbitrarily smaller than the Euclidean distance if $p$ is close to linear. Translating this distance back to Euclidean distance will just make this bound vacuous. Therefore, I think the claim of the paper is flawed and it does not seem to make improvement over prior work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "A couple of questions/issues for the authors:\n\n1. In the third example, it is claimed that splittable routing games are monotone. Can the authors justify this claim? It is not obvious to me.\n2. Before Proposition 5.1, the authors write that \"We have the following proposition which shows that the social welfare converges to optimal welfare on average\". This is not correct, the dynamics only approximate the optimal welfare (depending on the smoothness parameters)."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper studies an important and well-studied problem in the intersection of optimization and game theory, and makes a number of concrete contributions that improve over the prior state-of-the-art. As described above, the paper obtains the best-known rates for monotone games and time-varying games under bandit feedback, which is certainly an important contribution. Further, the techniques employed in the paper are also quite non-trivial, and rely on using two different regularizers in an interesting way. While most prior results examine last-iterate convergence in the full-feedback model, the more realistic bandit model introduces several technical challenges. I believe that the results of the paper are well within the scope of ICLR, and will be well-received from the community on learning in games. The most related papers have been adequately discussed. All claims appear to be sound; I did not find any notable issue in the approach. The writing overall is of high quality."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the last-iterate convergence of no-regret dynamics in monotone games under bandit feedback. In particular, they provide a new algorithm that attains the best-known rate of convergence of $T^{-1/4}$ and $T^{-1/2}$ under strong monotonicity, while guaranteeing at the same time the no-regret property. This is the first convergence rate for uncoupled dynamics in monotone games under bandit feedback. Further, for time-varying games, the convergence rate improves over the prior state-of-the-art."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In terms of weaknesses, it appears that the proof mostly relies on existing techniques, although the way those techniques are used seems to have new aspects. I would encourage the authors to highlight more the technical challenges and the technical contributions compared to prior work. There is also one issue that really confuses me: Section 5.3 is about the special case of linear cost functions; how is possible that the rate is worse in that special case? For example, matrix games are clearly monotone, so why doesn't the rate of $T^{-1/4}$ apply in that case?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- A big concern is the potential issue induced by the non-uniqueness of the Nash equilibrium for monotone games. In Theorem 5.1, convergence is measured by the distance of the actions to x_i^*. Does this statement hold for each x_i^*? Should the convergence be measured using the distance of the actions to the set of Nash equilibria? \n\n- The concept of 'uncoupled' is quite confused. In games, each player's payoff depends on the actions of other agents, which suggests that their dynamics should be inherently coupled. However, a clear description of strongly uncoupled dynamics is not given.\n\n- The assumptions required for each theorem are not explicitly stated, making it difficult for readers to follow directly from the theorem statements.\n\n- The presentation of Algorithm 1 should be improved. For example, agents should first sample z and then play the perturbed action. Besides, each agent should receive feedback after all agents play their actions. Please see Algorithm 2 in [Lin 2021]."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper provides a theoretical analysis on several cases, including the convergence rate for monotone games (both in expectation and with high probability), social welfare, linear cost, and the time-varying case."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the convergence of mirror-descent algorithm in general monotone games and strongly monotone games. Several theoretical results are given. The time-varying monotone games are also considered. Overall I find the analyzed problem not new and the convergence rate is not surprised."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Many convergence rates in this paper fail to improve the existing result. The advantage of the proposed algorithm is unclear. The simulation results are also weak to demonstrate the advantage of the proposed algorithm. \n\n- There is a large room for the simulation section to be improved. Please add comparison of Algorithm 1 to, at least, the method in [Lin 2021]. Besides, the examples used in simulation are simple.\n\n- The technical contributions are unclear. The techniques used, including the ellipsoidal gradient estimator, are similar to those found in existing literature."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can the $O(T^{-1/4})$ last-iterate convergence rate be considered as the $O(T^{3/4})$ regret for bandit convex optimization?\n\n2. For bandit optimization with convex and smooth functions, the state-of-the-art regret is $O(T^{2/3})$ by [1], which seems to correspond to a last-iterate rate of $O(T^{-1/3})$ in the setup of smooth monotone games. Is it possible to improve the current $O(T^{-1/4})$ to $O(T^{-1/3})$?\n\n3. For BCO problems with smooth or strongly convex functions, FTRL seems to be more popular than OMD [1,2]. Is it possible to represent Algorithm 1 using an FTRL-based update rule? Besides, are FTRL and OMD equivalent in the setup of BCO or bandit smooth monotone games?\n\n4. The rates in Table 2 are not last-iterate results. First, the guarantees in Duvocelle et al. (2023) and Yan et al. (2023) are represented in a regret-type rate. Second, the rate in Theorem 6.2 is actually not last-iterate since the performance measure is $\\frac{1}{T} \\sum_t \\cdots$. I suggest that the authors could correct this issue in the next version.\n\n5. It seems that the rates authors mentioned in Duvocelle et al. (2023) and Yan et al. (2023) do not need smoothness? I did not check their results very carefully, and I suggest that the authors could conduct a careful double check about whether the above two works need smoothness. If not, these works seem to be not comparable since Duvocelle et al. (2023) and Yan et al. (2023) focused on strongly monotone games without smoothness while this paper considers monotone smooth games.\n\n6. It should also be made clear that Duvocelle et al. (2023) and Yan et al. (2023) used tracking error $\\\\|x_t - x^*\\\\|^2$ as the performance measure, which is different from the measure used in this paper. Thus again I think these works are actually not comparable. Besides, if I am wrong about some statements, it is welcome that the authors could correct me.\n\n6. In Line 287, the authors said that \"We show that Algorithm 1 converges to the Nash equilibrium in monotone, strongly monotone, and linear games.\" It should be made clear that the results for different kinds of games require different parameter configurations. \n\n7. Could the authors provide some explanations about the validness of the permanence measure $\\sum_i D_p(x_i^*, x_i^{T+1})$? Why this measure is a valid measure of the distance of the algorithm's output to the Nash equilibrium? The gap using Bregman divergence heavily relies on how convex the function $p(\\cdot)$ is. If $p(\\cdot)$ is nearly a linear function, this measure seems not to be a good enough performance measure, from my point of view, intuitively. I guess that the authors use this quantity as the performance measure mainly due to technical reasons, as I said in the 'Weaknesses' part?\n\n\n\nReferences:\n\n[1] Improved Regret Guarantees for Online Smooth Convex Optimization with Bandit Feedback, AISTATS 2011\n\n[2] Bandit Convex Optimization: Towards Tight Bounds, NIPS. 2014"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Extending the studies from strongly monotone games to generally monotone ones is meaningful. The proposed algorithm where a new regularizer is used is novel. The authors have also proved a high-probability result for the global convergence rate. And they have also applied their results to the social welfare to validate the importance of their results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem of learning in a general monotone game with bandit feedback, with strongly uncoupled dynamics. For smooth cost functions, the authors achieved an $O(T^{-1/4})$ last-iterate convergence rate. For strongly monotone games, the obtained last-iterate convergence matches the state-of-the-art rate $O(T^{-1/2})$. The proposed algorithm uses the standard gradient estimator for BCO, but updates with a slightly different OMD-based algorithm with a different regularizer. When each player runs the same algorithm, the individual regret can also be guaranteed with $O(T^{3/4})$ regret for generally monotone games and $O(T^{1/2})$ for strongly monotone games. Finally, the authors further extended the results to time-varying games, considering two cases: (1) the time-varying game sequence gradually converges to a static game, and (2) the game sequence does not converge, and the performance measure relies on some non-stationarity measures depicting how the game sequence changes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The core of achieving the last-iterate convergence is inserting a regularizer $p(\\cdot)$ into the OMD update. Concretely, in the OMD update, the authors imported an additional additive $\\eta (t+1) D_p(x_i, x_i^t)$ into the OMD update. And the core analysis is shown in Lemma J.1 in the appendix. Specifically, by using $\\sum_i D_p(x_i^*, x_i^{T+1})$ as the performance measure, the $\\eta (t+1) D_p(x_i, x_i^t)$ in the OMD update can be extracted and moved to the left-hand side of the three-point equality of Bregman divergence, dividing both sides by $O(T)$ yields the performance measure on the left-hand side and a term like $(Regret)/T$ on the right-hand side. Consequently, it remains to analyze the regret guarantee of the proposed method and dividing it by $T$ yields the final last-iterate convergence rate. \n\nThis idea is novel. However, the key question is how we can access such function $p(\\cdot)$. As stated by the authors on Page 5, the function $p(\\cdot)$ has to satisfy the condition that $c_i(x_i, x_{-i}) - \\kappa p(x_i)$ is convex. To compute such a valid $p(\\cdot)$, the algorithm must have the complete information of the original cost function $c_i$, which is not permitted by the learning protocol since $c_i$ is unknown. Otherwise, if $p(\\cdot)$ is given by the learning problem, the condition of $c_i(x_i, x_{-i}) - \\kappa p(x_i)$ being convex should be an assumption that should be put in Section 3 along with Assumption 3.1. However, I am more inclined to believe that this should not be an assumption since $p(\\cdot)$ is something that appears in the algorithm, while assumptions are actually statements about the problem-dependent and algorithm-independent quantities. Can the authors provide some further explanations on this issue?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024uncoupled,\ntitle={Uncoupled and Convergent Learning in Monotone Games under Bandit Feedback},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zDJNUDprhW},\nnote={under review}\n}"
},
"abstract": {
"value": "We study the problem of no-regret learning algorithms for general monotone and smooth games and their last-iterate convergence properties. Specifically, we investigate the problem under bandit feedback and strongly uncoupled dynamics, which allows modular development of the multi-player system that applies to a wide range of real applications. We propose a mirror-descent-based algorithm, which converges in $O(T^{-1/4})$ and is also no-regret. The result is achieved by a dedicated use of two regularizations and the analysis of the fixed point thereof. The convergence rate is further improved to $O(T^{-1/2})$ in the case of strongly monotone games.\nMotivated by practical tasks where the game evolves over time, the algorithm is extended to time-varying monotone games. We provide the first non-asymptotic result in converging monotone games and give improved results for equilibrium tracking games."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"online learning",
"game"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/857d1fb1acf132c5af4c0a30d12cca3aceb5e12d.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/79325c8cd9a5c1a25a462615f29cd1cf9b5fd903.pdf"
},
"title": {
"value": "Uncoupled and Convergent Learning in Monotone Games under Bandit Feedback"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zDJf7fvdid | Zero-shot Novel View Synthesis via Adaptive Modulating Video Diffusion Process | main | Active | Diffusion sampling;Novel view synthesis | generative models | 3;5;6;6;6 | 3;4;3;4;2 | 2;2;3;3;3 | 2;2;3;3;3 | 2;3;2;3;2 | 5.2 | 3.2 | 2.6 | 2.6 | 2.4 | -0.045835 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Kindly refer to [Weaknesses]."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The idea of using depth-warped images as guidance for novel view synthesis is reasonable.\n* It is interesting to see that the temporal consistent video diffusion model can be effectively reformulated to achieve geometrical consistent NVS in a training-free manner.\n* Experiments on several challenging settings, including 360-degree NVS from a single view, verify the significance of the introduced method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces a diffusion model-based approach to achieve novel view synthesis. In particular, it leverages the depth-warped views as guidance to achieve adaptative modulation. Experiments on single-view images, multi-view images, and monocular video input-based novel view synthesis showcase the efficacy of the introduced methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Accessing the geometry accuracy. For the 360-degree case, e.g., the truck, it would be better to apply mesh reconstruction on the rendered views, similar to Fig. 5(b) in latentSplat [Wewer et al. ECCV 2024]. The reconstructed mesh will provide a clearer understanding of how well the rendered views maintain correct geometry.\n\n* Pixel-aligned metrics. For the NVS task, it would be better to report comparisons with state-of-the-part methods regarding pixel-aligned metrics, e.g., PSNR and SSIM. \n\n* Discussion with feed-forward 3DGS models. It might be interesting to see comparisons with detailed analysis between the introduced methods and those feed-forward 3DGS models, e.g., pixelSplat [Charatan et al., CVPR 2024], MVSplat [Chen et al., ECCV 2024]. And it would be better to consider adding these methods to the related work for better coverage of recent NVS works."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "For dynamic scene comparison, it is said \"For monocular video-based NVS, we downloaded nine videos from YouTube, each comprising \nframes and capturing complex scenes in both urban and natural settings.\" \n\nWhy not just following the dynamic nerf settings? They have well aligned ground-truth for measuring the reconstruction performance.\nGeneration metrics like FID are not that reliable.\n\nSoma example datasets are HyperNerf, DyCheck (https://hangg7.com/dycheck/)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed approach is entirely training-free, meaning it directly leverages pre-trained large video diffusion models without requiring additional fine-tuning or retraining. This feature not only reduces computational demands but also makes it adaptable to a wide range of applications where time or resources for training may be limited. The flexibility of using pre-trained models enhances its practicality, allowing users to apply this method to various scenes and tasks with minimal setup.\n\n2. The generated videos maintain high visual fidelity, delivering smooth results. This quality stems from the adaptive modulation of the diffusion process, which effectively incorporates scene details and structures from the given views, ensuring that outputs are kind of realistic.\n\n3. The method is underpinned by theoretical modeling, which guides its adaptive modulation strategy. By iteratively adjusting the score function with scene priors and analyzing estimation error boundaries, the approach achieves both controlled and adaptive modulation of the diffusion process."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a NVS method that leverages large pre-trained video diffusion models without additional training. The approach adaptively modulates the diffusion sampling process using input views to produce high-quality results from single or multiple views of static scenes or dynamic videos. Theoretical modeling is used to iteratively adjust the score function based on scene priors, enhancing control over the diffusion process. The modulation adapts to view pose and diffusion steps."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparison between this method and NeRF-based methods is fundamentally imbalanced. NeRF techniques incorporate an underlying 3D structure, enabling them to render any view with predictable performance, as the 3D structure informs which views are feasible and which are not. In contrast, the proposed method lacks an explicit 3D representation, limiting its view synthesis capabilities to specific views with no guarantee of consistent performance. This distinction is significant, as NeRF's inherent 3D information allows interpretable, reliable results across views, whereas this method’s output reliability is less predictable and may vary based on input views.\n\n2. To achieve a fair comparison between the proposed method and NeRF-based techniques, the authors should first reconstruct a 3D model from the output video of this method and then re-render the scene from that reconstructed model. This process would allow for a direct assessment of both methods’ rendering consistency and quality, ensuring that comparisons consider the 3D structure NeRF inherently leverages. Also, reconstruction error like PSNR should be reported.\n\n3. The video results of the proposed method exhibit visible flickering artifacts, which could substantially affect reconstruction quality and consistency. A deeper analysis is needed to assess how these artifacts impact overall reconstruction accuracy and to identify potential mitigation strategies. This might include tuning reconstruction parameters to minimize flickering, which would help improve the method’s output stability and robustness, especially for applications sensitive to temporal consistency.\n\n4. A major contribution is the derivation of the parameter $\\lambda$ in Section 4.2, which aims to minimize the estimation error upper bound in Equation 15. However, a gap remains between this upper bound and the actual estimation error represented in the left side of Equation 15. To strengthen the theoretical foundation, the authors should provide a more comprehensive analysis of how reducing the upper bound affects the actual estimation error. This could be achieved through statistical analysis and empirical evidence showing how well the method reduces estimation error in practice, thereby validating the theoretical assumptions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Line 415 mentions that the proposed method can achieve 360-degree NVS. Would it be possible to include a comparison with ZeroNVS [7] to better demonstrate its effectiveness?\n\n\n[7] Sargent, Kyle, et al. \"ZeroNVS: Zero-Shot 360-Degree View Synthesis from a Single Image.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2024."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed adaptive modulation of the score function in the diffusion process is novel.\n2. The proposed method achieves better results in various scenarios compared to baselines.\n3. The authors provide the code with an anonymous link, ensuring the applicability of the results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "1. This paper proposes a novel view synthesis pipeline without any training. The pipeline can take single or multiple views of static scenes or monocular videos of dynamic scenes as input.\n2. This paper modulates the score function with the warped input views to control the video diffusion process and generate visually pleasing results. They achieve the modulation in an adaptive fashion based on the view pose and the number of diffusion steps.\n3. They conduct extensive results on both static and dynamic scenes and show promising results with both evaluation numbers and visualizations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My primary concerns are with the references and experimental details:\n\n1. Some key references on diffusion-based NVS are missing [1,2,3,4,5,6,7]. Among these, [3] specifically focuses on scenes and has released its code. Is there a particular reason it was not included in the comparison?\n2. How is the synthesized view pose calculated in this paper? In Line 364, it states that 'current depth estimation algorithms struggle to derive absolute depth from a single view or monocular video, resulting in a scale gap between the synthesized and ground truth images.' When calculating pose error, does the proposed method account for this scale gap?\n\n**Minor Points:**\n\n1. In Table 1, it is stated that MotionCtrl [Wang et al., 2023b] and 3D-aware [Xiang et al., 2023] do not require training. However, as I understand, they do require fine-tuning.\n\n[1] Wu, Rundi, et al. \"Reconfusion: 3d reconstruction with diffusion priors.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2024.\n\n[2] Watson, Daniel, et al. \"Novel View Synthesis with Diffusion Models.\" *The Eleventh International Conference on Learning Representations*.\n\n[3] Yu, Jason J., et al. \"Long-term photometric consistent novel view synthesis with diffusion models.\" *Proceedings of the IEEE/CVF International Conference on Computer Vision*. 2023.\n\n[4] Cai, Shengqu, et al. \"Diffdreamer: Towards consistent unsupervised single-view scene extrapolation with conditional diffusion models.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n\n[5] Tseng, Hung-Yu, et al. \"Consistent view synthesis with pose-guided diffusion models.\" *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*. 2023.\n\n[6] Chan, Eric R., et al. \"Generative novel view synthesis with 3d-aware diffusion models.\" *Proceedings of the IEEE/CVF International Conference on Computer Vision*. 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The work is complete and results seem to be good quantitively."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a training free novel view synthesis paradigm based on video diffusion model. Specificcally, warped depth maps are utilized and certain sampling methods are proposed to ensure high quality NVS."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The work is complete, but the novelty and the contribution are not strong enough to meet the quality of ICLR. A good training-free is not that appealing after previous works like MotionCtrl, and it highly relys on the capability of SVD. It is hard to know about it generalizability around various video generation methods. \n2. It is recommended to show \"Directly guided sampling\" and \"Posterior sampling\" clearly in the view of practical implementation. Some illustration would be appreciated.\n3. Some equations should be displayed more clearly. For instance in eq.8, I(P_0) should not be related to the pose index i, but in eq.9 it is related to i. Maybe in eq.9 the I(P_0) should be revised as I(P_i)?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The proposed method sets the number of inference step to 100, which is quite large. Do other diffusion-based baselines also use the same number of inference step?\n2. The paper mentions the camera trajectory is estimated (L.360). Could you provide more details on how the pose metrics (ATE, RPE) are computed?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper presents a training-free method that enables a pre-trained diffusion model to generate image sequences based on given camera trajectories.\n2. The qualitative videos demonstrate that the proposed method generates plausible outputs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a training-free sampling method that generates image sequences conditioned on input camera trajectories. The input view is warped onto the target views, serving as pseudo-GT variables. To condition the reverse process, the paper re-computes the predicted mean of a reverse step with an interpolation between the predicted mean of the current noisy latent and the warped samples. It further explores calculating the optimal interpolation weight and two guidance methods (replacement and gradient). Both quantitative result and qualitative results show the proposed method can generate smooth, high-fidelity image sequences."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper relies on an off-the-shelf depth estimation network to warp the input image(s), which may be prone to scale inconsistencies across frames. Since the reverse process is largely dependent on the warped images, depth estimation errors could propagate to the final outputs.\n2. The paper does not explain how occluded regions are handled in depth-warping. Handling the occluded regions can be particularly challenging when the camera pose variation is large, as $\\tilde{\\mathbf{\\mu}}_{t, \\mathbf{p}_i}$ in Eq.12 may produce degenerate outputs. \n3. The camera trajectories in most qualitative results are limited to relatively small variations or 360-degree circular poses, where scale ambiguity from the depth estimation network can largely be ignored. This raises robustness of the proposed method when the camera variations are large. The paper could benefit from including more examples with larger camera trajectories.\n4. The toy experiment in Fig.2 (a) shows that the $\\mathcal{E}_D$ decreases with the diffusion reverse process. This brings another question on how the predicted means converge to the GT image (loss=0) where it is very unlikely to have sampled $\\mathbf{X}_t$ that would have led to the desired GT. I believe more details of the experiment can help understanding the paper better.\n5. The paper makes some assumptions to compute the optimal interpolation weight $\\lambda(t, \\mathbf{p}_i)$ in Sec.4.2. However, the paper does not present ablation study on the choice of the weight (except when $\\lambda(t, \\mathbf{p}_i) = \\infty$, which also shows comparable results). To validate the choice of the weight schedule, the paper could present comparisons to other naive techniques (e.g., linear, constant, exponential). Additionally, the interpolation weight depends on a set of hyperparameters $\\{ v_1, v_2, v_3 \\}$ which may require engineering effort to tune on new scenes. \n6. While the paper shows promising quantitative and qualitative results, the number of scenes used for evaluation is insufficient to validate the method's effectiveness and differs from previous state-of-the-art methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Our method adaptively modulates score functions of a pre-trained video diffusion to synthesize impressive novel views."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024zeroshot,\ntitle={Zero-shot Novel View Synthesis via Adaptive Modulating Video Diffusion Process},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zDJf7fvdid},\nnote={under review}\n}"
},
"abstract": {
"value": "By harnessing the potent generative capabilities of pre-trained large video diffusion models, we propose a new novel view synthesis paradigm that operates \\textit{without} the need for training. The proposed method adaptively modulates the diffusion sampling process with the given views to enable the creation of visually pleasing results from single or multiple views of static scenes or monocular videos of dynamic scenes. Specifically, built upon our theoretical modeling, we iteratively modulate the score function with the given scene priors represented with warped input views to control the video diffusion process. Moreover, by theoretically exploring the boundary of the estimation error, we achieve the modulation in an adaptive fashion according to the view pose and the number of diffusion steps. Extensive evaluations on both static and dynamic scenes substantiate the significant superiority of our method over state-of-the-art methods both quantitatively and qualitatively. The source code can be found on the anonymous webpage: https://github.com/PAPERID5494/VD_NVS. We also refer reviewers to the Supplementary Material for the video demo."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Diffusion sampling",
"Novel view synthesis"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7961b157c2c6f279fff19dfd8a51b528b7355e0f.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/2a56548e78db69ff5908b5167a1328dabf0e99a7.zip"
},
"title": {
"value": "Zero-shot Novel View Synthesis via Adaptive Modulating Video Diffusion Process"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zDjHOsSQxd | End-to-End Rule Induction from Raw Sequence Inputs | main | Active | Neuro-Symbolic Methods;Interpretability;Inductive Logic Programming | neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.) | 3;5;6;8 | 3;3;4;3 | 2;3;3;3 | 2;2;2;3 | 1;3;3;4 | 5.5 | 3.25 | 2.75 | 2.25 | 2.75 | 0.160128 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Question: Please clarify the generalizability of your approach. Do you expect the rule learning method to be sensitive to data errors that arise naturally in real-world collection, such as uncalibrated sensors or aspect/focus variation in images, or frame rate in video? Consider the following comment, if that helps clarify the objective of the request.\n\nI am not familiar with the UCR datasets, and I don't think you provided a link, nor did the paper you cite have a currently valid link; I found data and descriptions here: https://www.cs.ucr.edu/~eamonn/time_series_data/. Some datasets seem to have a small degree of possible coordinate sensitivity, such as the spectrogram data, while other would vary significantly with scale, placement of the signal within a collection window, and image orientation. (This is also true of MNIST). Please review the table below, and help me understand the sensitivity of your method to data collection errors. This gets at the issue of generalizability / fragility of the method. In the table, the first column has the dataset name, the second column records whether or not your method was the top score in your reported results, the third records a (possibly incorrect) sense of how sensitive the data collection is to getting all the coordinates right, and the fourth is an explanation of the assessment. \n\n| Dataset | NeurRL | Coordinate Sensitivity | Rationale for sensitivity assessment |\n|---------|-------------|----------------------|-------------|\n| Coffee | Top | Low | Based on spectrographic measurements which are inherently coordinate-independent. The frequency patterns would be consistent regardless of measurement setup, as long as calibration is maintained. |\n| ECG | -- | Medium | While heart rhythms have characteristic shapes, the absolute voltage measurements depend on electrode placement. However, the relative patterns are fairly robust across different setups. |\n| Gun Point | -- | High | Heavily dependent on camera angle, distance calibration, and the defined origin point for measuring hand position. The distinction between gun/no-gun relies on precise spatial coordinates. |\n| ItalyPow.Dem | Top | Low | Power demand measurements are absolute values independent of coordinate systems. The readings represent actual power consumption regardless of measurement setup. |\n| Lightning2 | -- | Low (?) | While electromagnetic measurements depend on sensor placement, the characteristic patterns of lightning types are relatively robust across different setups. |\n| CBF | -- | Low | As a synthetic dataset designed to test pattern recognition, the shapes (Cylinder-Bell-Funnel) are meaningful in their relative values rather than absolute coordinates. |\n| Face Four | Top | High | Face profile traces are highly dependent on camera angle, distance, and orientation. Changes in perspective would significantly alter the time series. |\n| Lightning7 | Top | Low (?) | Similar to Lightning2, with moderate dependence on sensor placement but relatively robust patterns. |\n| OSU Leaf | Top | High | Leaf contour measurements depend heavily on orientation, scale, and starting point of the trace. Different coordinate systems would produce very different time series. |\n| Trace | -- | Low | As synthetic nuclear instrument data, the patterns represent relative changes that are meaningful independent of absolute coordinate systems. |\n| WordsSyn | Top | High | Pen trajectories are highly dependent on writing orientation, scale, and starting position. The coordinate system directly affects the recorded patterns. |\n| OliveOil | Top | Low | Spectroscopic measurements are independent of coordinate systems. The chemical signatures would be consistent across different spectrometers (after calibration). |\n| StarLightCurves | Top | Low | Brightness measurements are relative values that remain meaningful regardless of the telescope's exact positioning (assuming proper astronomical calibration). |\n\nQuestion 2: Can you explain \"leakage\" more clearly? The coordinate dependence an example above creates an implicit bias in the data, for instance limited field-of-view and imaging system orientation in MNIST and handwriting digitization; is this an example of \"label leakage\"?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality: This provides a new and instructive implementation to address the raw-to-symbolic data problem, while avoiding the data leakage issue.\n\nQuality and Clarity: The paper explains and illustrates the method in a clear and compelling way.\n\nSignificance: This appears to be an important contribution to the problem of addressing the problem of learning symbolic rules about raw sequential data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces Neural Rule Learner (NeurRL), a system designed to learn interpretable rules from sequential data, such as from time-series sensor data (like spectrograms) and serialized higher-dimensional data (such as images).\n\nThe system uses symbolization functions to map raw data into a predetermined framework of pattern and region predicates, allowing discrete raw data sequences to be represented as logical atoms that can be used in rule learning. In the supplied reference implementation, patterns are learned automatically through a differentiable k-means clustering process, while regions are predefined subdivisions of the sequence. The combination of learned patterns and predefined regions allows the system to express discovered features as logical rules using this symbolic vocabulary.\n\nThe framework uses two types of symbolization functions: body symbolization functions that map discrete sequences to symbolic concepts, and head symbolization functions that map inputs to a set of atoms. The system avoids the \"label leakage\" problem by not requiring pre-trained neural networks to map raw inputs to symbolic labels. The entire pipeline is differentiable and can be trained end-to-end, learning directly from raw data without needing intermediate symbolic representations.\n\nThe implementation uses a deep rule-learning module with multiple dense layers, and rules are evaluated using precision and recall metrics. The reference implementation includes an autoencoder component for learning concentrated representations of subsequences.\nExperimental validation was conducted on both time series data (UCR datasets) and image data (MNIST). Learning on UCR data-subsets highlights low-data or data-efficient learning methods. The reference implementation achieves comparable or better classification accuracy compared to baseline methods, and provides interpretable rules. The generated rules are human-readable and capture patterns in the data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The approach may require significant data invariance (orientation of images, sampling regularity). Many important datasets have these properties, although image data often does not. See questions for further clarification.\n\nAs a demonstration of the advantage of this approach as an interpretable and explainable method, it would be helpful to have the interpretation of rules on each dataset from UCR discussed in more detail. How interpretable are the rules in terms of the data patterns? Are there particular datasets or data types for which rules emerge that provide insight or match intuition?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Differentiable ILP frameworks ($\\partial$ ILP and $\\alpha$ ILP) typically suffer from their intense memory consumption due to the tensor encoding. Does NeurRL share the same problem? \n\nFor other questions, please refer to the weakness section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper tackles an important topic in the field of differentiable ILP, specifically the need for pre-trained models to ground raw input into symbols for effective rule learning. The approach of combining differentiable k-means and VAE with gradient-based rule learners is noteworthy, even though the concept is relatively straightforward.\n\nThe manuscript is well-written, with a high-quality presentation overall. It clearly outlines the research question and effectively conveys its core idea.\n\nThis work is a valuable contribution to the neuro-symbolic research community, paving the way for developing more robust systems capable of learning with fewer priors on raw input."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new differentiable Inductive Logic Programming framework where symbolic rules are learned via gradients from sequential data. To overcome the bottleneck of previous models that require pre-trained neural networks to perceive raw input, the authors propose Neural Rule Learner (NeurRL), which combines VAE and differentiable k-means to the differentiable rule learning approach. The resulting framework can learn classification rules that specify regions and corresponding patterns to be classified as positive. It is demonstrated in the experiments using simple datasets, including synthetic, UCR, and MNIST datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My primary concern is that it remains unclear whether the main claim is adequately supported by the methods and experiments sections. The paper asserts:\n\n> (Line 42-45) Learning logic programs from raw data is hindered by the label leakage problem common in neuro-symbolic research (Topan et al., 2021): This leakage occurs when labels of ground objects are introduced for inducing rules (Evans & Grefenstette, 2018; Shindo et al., 2023).\n\nand\n\n> (Line 82-85) In our work, we do not require a pre-trained large-scale neural network to map raw input data to symbolic labels or atoms. Instead, we design an end-to-end learning framework to derive rules directly from raw numeric data, such as sequence and image data.\n\nIf this claim holds, the proposed system should be trainable end-to-end, grounding symbols in meaningful ways such that rule learners can generate rules based on them. However, the paper does not make it clear how this grounding is accomplished.\n\nDoes NeurRL perform grounding on the obtained clusters? Can the rule learners manage rules involving multiple clusters by understanding the meaning of each cluster? If not, I am skeptical that the proposed framework fully addresses the bottleneck identified in the introduction, namely the reliance on perception models in prior rule learning frameworks like $\\partial$ ILP and $\\alpha$ ILP.\n\nWithout grounding, the method would be confined to the rule format (Eq. 5), as the system would lack the means to learn from non-grounded symbols, limiting its applicability to other domains.\n\nWhile I appreciate the method's ability to learn from small datasets, it is unclear how it would scale to larger, more complex datasets. Given that neural models typically require large datasets, future development would benefit from the capacity to handle such data at scale to better integrate neural networks. It would be beneficial to provide runtime comparisons on different dataset sizes, or to discuss potential challenges and solutions for scaling the approach to larger datasets. How does the model's performance change with increasing dataset complexity and size? If scalability to large-scale data is an issue, this limitation should be discussed somewhere in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "In addition to discuss the above mentioned weaknesses, please find below some additional questions: \n\n1. The proposed solution introduces several hyperparameters, including parameters for data pre-processing (such as subsequence length and the number of temporal/spatial slots), fixed biases for the rule learner’s neurons, temperature parameters for the rule learner’s weights, and weights for the loss functions. How are these hyperparameters selected, and how sensitive is the model’s performance to their values? A sensitivity analysis could clarify this, and it should be feasible given the small size of the datasets. \n2. The post-hoc strategy achieves lower predictive accuracy compared to the full neural solution (see Table 1), with drops of up to 15 percentage points in some cases. This underscores the heuristic nature of the proposed strategy. Could you provide an explanation for this phenomenon? Additionally, what can be said about the faithfulness of the extracted rules in relation to the learned model’s weights? \n3. Why is neuro-symbolic learning necessary for the chosen MNIST task, and what is the rationale for this choice? The task could be solved by relying on superficial image cues (such as identifying regions that are either black or white), as illustrated by the qualitative demonstration in Figure 4, without needing to understand the underlying semantics of the digits. Therefore, a purely neural approach might suffice for this task. It would be interesting to assess the model’s capability to recognize digit classes within arithmetic tasks or, more broadly, in tasks defined by programs over integer sequences (e.g., sequences of even or odd numbers, counting). \n4. Are the codes learnable in the differentiable clustering layer and how do you ensure that meaningful clusters can be learnt, without experiencing any form of collapse? Please refer for instance to the work in [1].\n\n**References**\n\n[1] Sansone. The Triad Of Failure Modes and a Possible Way Out. In NeurIPS SSL Workshop 2023"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is overall clear **Clarity**. However the text should be polished to rewrite/rearrange words in some sentences and correct the several typos present throughout. Please refer the MINOR paragraph below for a non-exhaustive list.\n2. The considered problem is relevant and timely **Relevance**.\n3. Code is available, but no further check has been performed **Code availability**.\n\nMINOR \\\nL.109 -> $\\neg\\alpha_2$ \\\nL.129 -> a substitution or an interpretation \\\nL.151 -> is ordered real-valued observations with the dimension one \\\nL.159 -> discrete -> discretize \\\nL.161 -> closest \\\nL.195-197 -> could you rephrase the sentence? \\\nL.273 -> the its \\\nL.325-327 -> could you rephrase the sentence? \\\nL.381 -> synthetic"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a supervised neurosymbolic model for time series data, structured as a cascade of three main building blocks. First, an autoencoder extracts an embedding representation from raw data. Next, a differentiable clustering module maps the embedding to a symbolic representation, followed by a fuzzy neural rule learner that maps the symbolic representation to the target. The overall training objective includes three loss functions, one for each building block: a reconstruction error for the autoencoder, a k-means objective for the differentiable clustering, and a supervised cross-entropy loss for the rule learner.\nTraining occurs in two stages, beginning with the pre-training of the autoencoder, followed by the fine-tuning of the entire model. Additionally, a post-hoc heuristic is introduced to extract rules in propositional logic form from the weights learned by the rule learner. Experiments are conducted on a series of small datasets from UCR, comparing the model to existing neural and kernel-based classification baselines for time series data, with results demonstrating superior predictive accuracy. Lastly, a qualitative experiment on MNIST data is provided."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper lacks adequate context within the existing neuro-symbolic learning literature, making its contribution appear incremental in terms of novelty **Quality**. For example, neuro-symbolic (NeSy) autoencoders have been previously explored (see [1,2]), and differentiable clustering has also been investigated (refer to [3,4]). Additionally, the rule learner shows a strong resemblance to $\\delta$ILP. At a minimum, a discussion of related work should be included to clarify the paper's novelty and contribution. An experimental comparison with existing methods could also help demonstrate the advantages of the proposed approach. \n2. The paper lacks soundness due to several incorrect and overly ambitious claims that are not adequately supported **Soundness**. First, the paper states that this work is the first to enable joint training of the neural network and rule learner without pre-training. However, this claim is inaccurate, as the proposed training still requires a two-stage process. Additionally, contrary to what is mentioned, the work in [5] already achieves joint learning of a neural network and a rule learner. An experimental comparison on the datasets introduced in [5] would be valuable. Furthermore, the paper suggests that the proposed solution can uncover underlying rules without identifying conditions for failure. However, depending on the level of supervision and the complexity of the symbolic task, the neural learner may learn incorrect mappings from raw data to symbols. The work in [3,4] presents a basic sequential setting to analyze this issue, demonstrating that while representation learning (through differentiable clustering) is necessary to address the problem, it is not sufficient. Similar observations are subsequently reported in [6,7], where the authors label this issue as \"reasoning shortcuts.\" \n3. There is very limited discussion about the details of the experimental methodology, raising serious concerns regarding reproducibility **Reproducibility**\n4. Experiments are conducted on small datasets with relatively simple symbolic tasks **Quality**. I encourage the authors to consider more traditional and more complex experimental settings for neuro-symbolic learning, including the arithmetic tasks on handwritten digits or the datasets in [5].\n\n**References**\n\n[1] Misino, Marra, Sansone. VAEL: Bridging Variational Autoencoders and Probabilistic Logic Programming. In NeurIPS 2022 \\\n[2] Zhan, Sun, Kennedy, Yue, Chauduri. Unsupervised Learning of Neurosymbolic Encoders. In TMLR 2022 \\\n[3] Sansone, Manhaeve. Learning Symbolic Representations Through Joint GEnerative and DIscriminative Training. In ICLR NeSy-GeMs Workshop 2023 \\\n[4] Sansone, Manhaeve. Learning Symbolic Representations Through Joint GEnerative and DIscriminative Training. IJCAI KBCG Workshop 2023 \\\n[5] Evans et al. Making Sense of Sensory Input. In Artificial Intelligence 2021 \\\n[6] Marconato et al. Neuro-Symbolic Continual Learning: Knowledge, Reasoning Shortcuts and Concept Rehearsal. In ICML 2023 \\\n[7] Marconato, Teso, Vergari, Passerini. Not All Neuro-Symbolic Concepts Are Created Equal: Analysis and Mitigation of Reasoning Shortcuts. In NeurIPS 2023"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is there a way to reduce the number of hyperparameters, such as determining the number of categories K adaptively during training? If this is not feasible, please at least provide guidance on how to select these hyperparameters and show how different choices impact the experimental results.\n2. Please compare your work with more related studies (see Weakness 4)"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper formally establishes the ILP learning task from raw data, utilizing the interpretation transition framework of ILP to guide the learning process.\n2. Experimental results demonstrate that NeurRL shows a notable improvement over previous methods in terms of effectiveness.\n3. The paper's overall presentation is clear, making the concepts and methodologies easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel differentiable Inductive Logic Programming model called NeurRL, aimed at solving the problem of learning symbolic rules from raw data. Unlike previous methods that rely on pre-trained neural networks to extract labels for symbolic learning, NeurRL combines a differentiable clustering module and a deep neural network learning module, enabling end-to-end rule learning from raw data such as time series and images without leaking label information."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The model has a large number of hyperparameters (especially the number of categories K, subsequence length l, and the number of regions P), which may negatively affect reproducibility and generalizability.\n2. The experimental details are insufficient, lacking information on how hyperparameters were chosen and whether tuning was done separately for different experiments. Moreover, it is unclear if the results reported are the best performance or averaged.\n3. The learned predicates lack high-level abstraction, instead offering post-hoc explanations for neighboring pixels. This limits the generalizability and interpretability of the induced rules.\n4. Although previous works did not formally define the task of extracting rules from raw data, some have achieved similar goals with comparable architectures (e.g., autoencoder + discriminator)[1, 2], or going further in technical sophistication[3] . This paper lacks a discussion of and comparison with such works.\n5. The model does not consider translation invariance in the data, meaning some patterns should only need to be present rather than bound to a specific region. This limitation may hinder the model's performance in tasks where the position of the target is irrelevant.\n\n[1] Azzolin, S., Longa, A., Barbiero, P., Liò, P., & Passerini, A. (2022). Global explainability of gnns via logic combination of learned concepts. arXiv preprint arXiv:2210.07147.\n\n[2] Walter, N. P., Fischer, J., & Vreeken, J. (2024, March). Finding interpretable class-specific patterns through efficient neural search. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 38, No. 8, pp. 9062-9070).\n\n[3] Wang, B., Li, L., Nakashima, Y., & Nagahara, H. (2023). Learning bottleneck concepts in image classification. In Proceedings of the ieee/cvf conference on computer vision and pattern recognition (pp. 10962-10971)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024endtoend,\ntitle={End-to-End Rule Induction from Raw Sequence Inputs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zDjHOsSQxd},\nnote={under review}\n}"
},
"abstract": {
"value": "Rule learning-based models are widely used in highly interpretable scenarios for their transparent structures. Inductive logic programming (ILP) is a form of machine learning that induces rules from facts and keeps the interpretability. Differentiable ILP models enhance their learning ability in a robust and scalable manner with the advantages of neural networks. However, most differentiable ILP methods learn from symbolic datasets. Learning from raw data needs an ILP model to tackle the symbol grounding problem: The inability to map continuous inputs to symbolic variables without explicit supervision. In this work, we incorporate a self-supervised differentiable clustering model and a novel differentiable ILP model to learn from raw data in an end-to-end way without leaking the labels. The learned rules describe the raw data with its features. We demonstrate that our method learns generalized rules from time series and images intuitively and precisely."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neuro-Symbolic Methods",
"Interpretability",
"Inductive Logic Programming"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b84668af5d4361522cf2d3378c0df9454a23f197.pdf"
},
"presentation": null,
"primary_area": {
"value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/3cda9565406c36f0e6adc83ceecdac75e2986f5e.pdf"
},
"title": {
"value": "End-to-End Rule Induction from Raw Sequence Inputs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zDze7VtB5C | A Little Depth Goes a Long Way: the Expressive Power of Log-Depth Transformers | main | Active | transformer;expressivity;limits;bounded context;circuits | other topics in machine learning (i.e., none of the above) | 3;3;5;8 | 4;3;3;2 | 2;3;3;4 | 2;1;3;4 | 2;2;2;4 | 4.75 | 3 | 3 | 2.5 | 2.5 | -0.863868 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Are there specific conditions or types of tasks where width expansion or chain-of-thought reasoning may still be advantageous compared to log-depth scaling?\n\n2. Do you have plans to empirically validate these theoretical results on real-world datasets? If so, what metrics or benchmarks would you prioritize to assess the practical benefits of log-depth scaling?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. **Innovative Depth Scaling Insight**: This paper shifts the focus from fixed-depth transformers to a dynamic, log-depth scaling, addressing some limitations of traditional models in handling extended contexts. This perspective broadens our understanding of transformers’ potential expressivity.\n\n2. **Rigorous Theoretical Foundation**: The paper provides clear mathematical proofs and complexity analyses that validate the advantages of log-depth scaling for specific tasks, particularly regular language recognition and graph connectivity. This rigor strengthens the work’s contributions to understanding transformers’ computational capacity.\n\n3. **Comprehensive Comparison with Other Scaling Approaches**: The authors examine depth scaling in comparison to width scaling and chain-of-thought methods, demonstrating that logarithmic depth growth is more computationally efficient and effective for reasoning tasks, especially for state tracking and pathfinding in bounded contexts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper explores the computational expressivity of transformers when their depth scales logarithmically with input context length. Prior analyses typically assume a fixed transformer depth, which limits the model’s ability to solve certain tasks as context length increases. This work, however, argues that transformers can still solve certain problems up to a bounded input length, even if they cannot handle them for arbitrarily large inputs. By scaling depth logarithmically with the input length, transformers can effectively simulate finite automata and solve graph connectivity problems, which are critical for tasks involving multi-step reasoning and state tracking. These findings suggest that only a modest, logarithmic increase in depth is required to address such tasks, offering a path for efficient model scaling and highlighting the benefits of dynamic depth adjustments over simply expanding model width or employing chain-of-thought reasoning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Consideration of Complementary Methods**: The paper promotes depth scaling over width or chain-of-thought expansion but could benefit from a more nuanced discussion of scenarios where those methods may still offer advantages or may complement log-depth scaling.\n\n2. **Lack of Experimental Validation**: Although the theoretical findings are compelling, the paper would be stronger with empirical experiments demonstrating the practical performance of log-depth transformers on real-world tasks and quantitatively comparing their efficiency and effectiveness with other models."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1. Given the two differences from the standard transformer architecture and the fact that Lemma 1 holds because of the masked pre-norm, do you think that the results hold for standard transformer architecture? \n\nQ2. Would it be possible to add a more informal introduction to let other people read and fully understand the work without looking at prior work? \n\nQ3. Did you also see a similar law for other architectures (like https://arxiv.org/abs/2405.06394 or other constant transformer-like model)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "S1. Authors showed that looped transformer (non-constant depth architecture) can solve problems more efficiently than CoT or adding width. This is quite interesting for theoretical transformer foundation, although the impact is pretty limited for current constant-depth architecture, given it's not clear and easy to scale large training for looped transformer. \n\nS2. The findings could inform more efficient model scaling strategies, maybe combining the effect of CoT with log n depth.\n\nS3. The paper is well written and all relevant lemmas are well explained."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work shows that two reasoning problems where standard transformer struggled to solve can be solved by a looped transformer. The work also shows that modular arithmetic with small modulus can be solved with the same architecture. Authors finally showed that scaling depth is more efficient than width or adding CoT intermediate steps."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. Major weakness is that the theoretical results lack experimental validation on the tasks author suggested, limiting real world applicability. Also, dismissing CoT with a couple of lines seems unfair, given the potential showed by CoT on arithmetic and graph related problems, above all.\n\nW2. Given the memory management issue, implementing dynamical depth is not clear and also it's not clear if a looped transformer can be indeed __trained__ to solve the two mentioned problems. \n\nW3. The paper is very intricate and a bit more explanation may be needed to understand this work. Also the work is not really self contained. \n\nW4. Although theorem 3 and 4 are the core part of the authors work, they are not well justified."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I would appreciate it if you could address the weaknesses I highlighted earlier. Specifically, I am interested in the following:\n\n- Could you provide additional clarification on your contributions regarding the first question, particularly on how you contribute to understanding the limits of the expressiveness of fixed-depth transformers with bounded inputs?\n- Could you offer a more detailed and rigorous explanation of the concepts discussed in Section 2.2?\n- Am I correct in identifying a potential confusion between decoder-only and encoder-only models, or is there a detail I might have overlooked?\n- What is the precise method of masking you employ within your model?\n\nI'm keen to resolve these issues as I believe your work holds significant value. However, the points mentioned above currently obscure its impact."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The three areas that the paper aims to contribute to are both relevant and intriguing. Particularly, \nthe assumption of bounded input lengths, which is closer to practical settings, deserves thorough investigation.\n- Setting aside the notational and definitional ambiguities discussed below, the paper supports all \ntheoretical claims with proofs, which appear to be sufficient.\n- The related work, specifically regarding the expressive capabilities of different transformer models, \nis well chosen. I concur that this paper contributes significantly to these domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the expressive capacities of transformers where \nthe depth grows logarithmically with the length of the inputs. Specifically, the paper seeks to \ncontribute to three questions:\n- Can fixed-depth transformers handle \"hard\" problems when restricted to bounded input lengths?\n- Does logarithmic depth scaling offer greater expressive abilities compared to a fixed depth?\n- What practical advantages do log-depth transformers or input-dependent depth offer?\n\nInformally, the paper's primary theoretical contributions regarding the \"universal transformer\" model, \nwhich is characterised by a basic encoder architecture with certain layers repeated a number of times \ndependent on the input length, are as follows:\n- (Theorem 1) For each regular language $L$, there exists a universal transformer, with logarithmic repetition, \nthat recognises $L$.\n- (Theorem 2) There exists a universal transformer, with logarithmic repetition, that decides whether, \ngiven a graph $G$ and vertices $s$ and $t$, there is a path from $s$ to $t$.\n- (Theorems 3 & 4) Placing different classes of transformers (fixed depth or CoT) in $\\text{TC}^0$."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "To begin, I will summarize the main issue I encounter with this paper. \nAs mentioned earlier, the contributions the paper aims to provide are intriguing, and I also think they are partially achieved. \nHowever, the current form of the paper makes it challenging to grasp the significance of these contributions and how they compare to existing results. This difficulty arises primarily from a lack of clear definitions, confusing informal statements, and a disconnect between the theoretical contributions and their proposed implications. \n\nI will go into detail below, loosely ordering the weaknesses from most to least significant:\n- I am not convinced that the paper sufficiently addresses the first and third contributions it aims to make. The main results focus on transformers with dynamic depth, leaving me unclear about the implications for fixed-depth models. Lines 76 to 83 seem to mention this in the context of an experiment, but it is only briefly referenced and lacks further development throughout the paper. Additionally, the explanation of the third contribution in Lines 86 to 9, which states, \"scaling depth logarithmically as a computational resource more efficiently expands the expressive power of transformers compared to scaling width...\", appears to be an overinterpretation based solely on the results of Theorems 1 & 2 versus Theorem 3. If this claim is to be maintained, the paper needs to provide more detailed justification for such a bold statement, which is currently absent.\n- The definition of the (s, r, t)-universal transformer model lacks clarity and detail. While I understand the authors' intent to be succinct, even those familiar with related research might struggle to comprehend the model's specifics. A thorough understanding is crucial for comparing the expressiveness of different transformer models. Here are some points related to the definition in Section 2:\n - There seems to be a confusion in terminology regarding the \"semantics\" of universal transformers, which are described as sequence-to-value functions. However, on line 103, they are referred to as a decoder, which is typically associated with sequence-to-sequence transformations.\n - The definition of masking within this model is unclear. The phrase \"... add a learned mask vector that can select specific dimensions of the residual stream ...\" is ambiguous and needs clarification. This issue is particularly important because, for example, in Lemma 1, the term \"...causally masked attention...\" is mentioned, but its meaning is not well defined.\n - The notation $L^l$ is ambiguous. Does it signify layer $l$ or the function it performs? Additionally, the notation $r$-layer $(l-s) \\mod r$ is not defined\n - The definition of averaging-hard attention using a limits construction and an exponential function diverges from the common use of hardmax in related works cited within the paper. Are there differences that justify this choice?\n - Section 2.2 appears disjointed and complicates the definition. The first sentence raises multiple questions, particularly on the necessity of \"memory management.\"\n- The presentation of Theorem 2 is somewhat misleading. The problem addressed is not the \"connectivity problem\" in the traditional sense, which involves deciding whether all nodes are interconnected by some path. Instead, it is the \"reachability problem,\" which focuses on deciding if there is a path connecting specific nodes.\n- The role of Theorem 4 is not clearly articulated. The theorem is referenced with \n(Anonymous, p.c.), which is somewhat unconventional. Is this result established within this paper? \nIt seems there is a proof provided in the appendix, so the rationale for this citation is unclear. \nIf the theorem is not original to this work, clarification is necessary regarding its inclusion.\n- The abstract begins with the statement: \"Most analysis of transformer expressivity treats the depth (number of layers) of a model \nas a fixed constant, ...\". I find this statement problematic. It seems that the authors are referring to research focusing on formal language theory, which aims to understand which classes of languages specific classes of transformers can recognize. \nWhile individual transformer models have a fixed depth, this is right, the class of models does not need to be constrained in this way. This is an example of an informal assertion that leads to confusion. \n- Lines 44 to 46 include the statement, \"This is analogous to how regular expressions cannot express all context-free languages, but one can write regular expressions that capture fragments of a context-free language.\" The paper uses this analogy to imply that transformers generally perform effectively on shorter input lengths. This statement is somewhat perplexing because the point about regular expressions is quite basic; if there is additional nuance or significance, such statements require further clarification."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What are the other configurations (e.g., model width, maximum number of attention heads, etc.) for the Transformers in the main theorems, specifically Theorems 1 and 2? \n\n2. Can you provide more clarification of your results in relation to prior work, such as [1]? (Refer to weakness 3)\n\n3. Can you conduct experiments on the two tasks considered in the paper, varying the model depth, model width, and the number of attention heads to empirically evaluate how these factors affect performance?\n\n[1] William Merrill and Ashish Sabharwal. The parallelism tradeoff: Limitations of log-precision transformers. https://arxiv.org/abs/2207.00729."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors utilize saturated attention rather than hard attention, aligning more closely with practical applications. Additionally, they consider the effects of layer normalization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates log-depth Transformers, demonstrating that their capabilities exceed those of constant-depth Transformers ($\\mathsf{TC}^0$) and constant-depth Transformers with $O(\\log n)$ chain-of-thought steps. Through theoretical constructions, the authors show that a log-depth Transformer can effectively solve the regular language recognition problem and the graph connectivity problem, both of which fall outside the expressive power of constant-depth Transformers with polynomial size."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation for log-depth Transformers lacks sufficient conviction. Given the bounded context assumption in line 50, it would be more appropriate to treat all elements as constant. If bounded context and model sizes are to be discussed carefully, as indicated in lines 76-78 regarding the maximum problem size of graph connectivity with fixed depth $d$, the coefficient is crucial, while the result of $2^{O(d)}$ remains unclear.\n\n2. The results presented lack clarity. In the main theorems (Theorems 1 and 2), only the model depth of $O(\\log n)$ is specified, omitting other critical configurations such as the required model width and the number of attention heads.\n\n3. Additional clarification is needed regarding the results in relation to prior work. Specifically, Lemma 3 of [1] demonstrates that a Transformer with depth $2d$ maps $\\langle C,x\\rangle$ to the circuit value $C(x)$, where $C$ is a threshold circuit with depth $d$. Since regular language recognition and the graph connectivity problem both belong to $\\mathsf{TC}^1$, this lemma could already suggest comparable results in this paper.\n\n4. The absence of experimental results to validate the theoretical findings is a significant gap. Empirical verification of the solid lines in Figure 1 is also essential.\n\n[1] William Merrill and Ashish Sabharwal. The parallelism tradeoff: Limitations of log-precision transformers. https://arxiv.org/abs/2207.00729."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We provide the first expressivity analysis of transformers that accounts for model depth and explains how transformers might use depth to successfully solve problems on bounded context lengths that they otherwise cannot solve."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Little Depth Goes a Long Way: the Expressive Power of Log-Depth Transformers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zDze7VtB5C},\nnote={under review}\n}"
},
"abstract": {
"value": "Most analysis of transformer expressivity treats the depth (number of layers) of a model as a fixed constant, and analyzes the kinds of problems such models can solve across inputs of unbounded length. In practice, however, the context length of a trained transformer model is bounded. Thus, a more pragmatic question is: *What kinds of computation can a transformer perform on inputs of bounded length?* We formalize this by studying highly uniform transformers where the depth can grow minimally with context length. In this regime, we show that transformers with depth $O(\\log C)$ can, in fact, compute solutions to two important problems for inputs bounded by some max context length $C$, namely *simulating finite automata*, which relates to the ability to track state, and *graph connectivity*, which underlies multi-step reasoning. Notably, both of these problems have previously been proven to be asymptotically beyond the reach of fixed depth transformers under standard complexity conjectures, yet empirically transformer models can successfully track state and perform multi-hop reasoning on short contexts. Our novel analysis thus explains how transformer models may rely on depth to feasibly solve problems up to bounded context that they cannot solve over long contexts. It makes actionable suggestions for practitioners as to how to minimally scale the depth of a transformer to support reasoning over long contexts, and also argues for dynamically unrolling depth as a more effective way of adding compute compared to increasing model dimension or adding a short chain of thought."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"transformer",
"expressivity",
"limits",
"bounded context",
"circuits"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/196a1bf71ae37a3b4b5b6ef0331c4110dea635a3.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "A Little Depth Goes a Long Way: the Expressive Power of Log-Depth Transformers"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zE4mL85zgg | CLAD: A Contrastive Learning based Method for Multi-Class Anomaly Detection | main | Active | Industrial anomaly detection;Multi-class anomaly detection;Contrastive Learning | applications to computer vision, audio, language, and other modalities | 1;1;3;3;3 | 5;4;5;5;4 | 2;1;1;1;2 | 2;1;1;2;2 | 2;1;1;1;2 | 2.2 | 4.6 | 1.4 | 1.6 | 1.4 | 0.166667 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The writing quality of the article is subpar, and the paper appears to lack certain crucial elements: there are inaccuracies in the referencing of figures, particularly early in the overview section; there is a notable absence of analysis pertaining to experimental and visualization outcomes; the paper does not include any ablation studies; and there is a scarcity of citations and comparative work. Does this suggest that the work is yet to be considered complete?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "+ The method effectively leverages supervised contrastive learning to enhance feature separability across multiple classes.\n+ The use of a dimensionality reduction adaptor helps reduce feature complexity while preserving relevant information for anomaly detection, making the model more efficient and focused on essential patterns."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a Contrastive Learning-based multi-class Anomaly Detection (CLAD) method designed to enhance multi-class anomaly detection in industrial contexts. The proposed method combines dimensionality reduction and supervised contrastive learning to generate and differentiate between normal and anomalous samples across various classes. It uses a two-stage training process involving initial discriminative feature learning and subsequent fine-tuning, significantly improving detection accuracy on benchmark datasets like MVTec and VisA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ The author has made an error in the plotting of Figure 2, as the elements within the figure are not aligned properly.\n+ The paper lacks ablation studies and could be expanded to include a deeper analysis of how each training loss term contributes to the final model performance.\n+ The layout of tables and text in the experimental section of the article is not rigorous, with excessive spacing.\n+ The model structure in the article is simple and lacks innovation.\n+ The paper has a limited number of citations and lacks a thorough comparison with relevant existing work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1.What does $p_m$ U (0, 1) mean in Line 209? What is $x_i′$ in Line 212?\n\n2.What is $\\mu$ in Eq.8? Where is the equation for calculating anomaly score $s_i$ in Line 323? You did not mention it in the methodology.\n\n3.Is the batch size B in contrastive learning equal to the number of multi-class categories? How do you balance the computational cost and training efficiency when dealing with a large number of categories?\n\n4.See weakness above."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "It proposed a framework for robust multi-class anomaly detection."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a framework for robust multi-class anomaly detection. It first generates representative anomaly samples for each class by adapting existing anomaly generation techniques. A supervised contrastive learning strategy is then introduced to construct feature distributions based on patch-level class labels, allowing the model to capture fine-grained distinctions between classes. The method achieves state-of-the-art results on various benchmarks, demonstrating both its effectiveness and efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The paper's writing quality is below standard, with excessive notation, numerous typos, and poor organization, all of which significantly hinder readability.\n\n2.The paper presents only the main results and lacks additional experiments, such as ablation studies, sensitivity analyses, etc., which are essential for thoroughly evaluating the robustness and effectiveness of the proposed approach.\n\n3.Overall, I believe this is an incomplete work that requires further development and additional experiments to fully validate the findings. It should be withdrawn in its current version."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Related work is lacking. In total 10 papers are cited but the anomaly detection field is very broad. Even the competing methods that the proposed method is compared with in the evaluation are not cited. \n\nAt times poorly written - Section 3.3 is an example. The first few sentences are very difficult to understand. Also lines 231 to 247 could be rewritten to improve clarity since it is filled with typos and unclear notation.\n\nL323 – The anomalous examples should be higher than sigma, the normal samples should be lower than minus sigma according to equations 21 and 21. Incorrectly stated in the text if I am not mistaken, although this is a minor detail.\n\nThe evaluation section is severely lacking. An ablation study evaluating design choices such as the use of the GLAS-based anomaly generation method, weighted loss, use of the contrastive objective, use of L_var etc."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- Good performance on the multi-class setting. \n- The proposed method is generally well described."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The proposed method utilizes the anomalies generated following the GLAS method and incorporating additional losses and training techniques to improve the results. For training, the contrastive losses, hard negative mining, and a variance regularization term are used. The method is evaluated on the MVTec AD and the VisA datasets. The proposed method achieves excellent results on both datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Lacking related work (only 10 papers are cited from the very active field of anomaly detection)\n- Severly lacking evaluation section. No ablation study. One of the contributions listed in the introduction is a thorough evaluation of model backbones and configurations which is not fulfilled in the rest of the paper.\n\nWhile the proposed method achieves solid performance on the multi-class setting, the evaluation section is severely lacking. The design choices, such as the additional loss terms, are not properly evaluated in an ablation study which makes it impossible for the reader to extract important insights and understand the contribution of individual components. This can not be overseen and would require a rewrite of a significant part of the paper to include so I believe this alone is grounds for rejection."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. The paper lacks ablation studies, making the overall structure incomplete.\n2. In lines 012-013 of the abstract, the authors claim to address the high computational complexity of existing models with their proposed framework. However, there are no subsequent experiments to support or validate this claim.\n3. The experimental results do not show a significant advantage over existing multi-class anomaly detection methods. It is recommended to validate the method's effectiveness on more datasets.\n4. Since there is already a contrastive learning-based anomaly detection method, ReConPatch, the authors should compare their method with ReConPatch in the experiments."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The authors propose a multi-class anomaly detection method based on contrastive learning. They introduce a two-stage training framework to train the Adapter and Discriminator separately."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a multi-class anomaly detection method based on contrastive learning. They introduce a two-stage training framework to train the Adapter and Discriminator separately. The effectiveness of the method is validated on the anomaly detection metrics of the MVTec-AD and VisA datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "See Questions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "NA"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed method achieves better anomaly detection performance than the selected alternatives."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes to enhance existing embedding-based anomaly detection methods to multi-class anomaly detection through contrastive learning. There exists several critical problems thus I vote to reject this paper."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Related works are not comprehensive enough, which makes this manuscript less convincing. The authors categorize anomaly detection methods into reconstruction-based and embedding-based methods, ignoring lots of related works like memory-bank-based, knowledge-distillation-based, etc.\n\nAlso, lots of works lack proper references, like the author mentions DINO without refer to the corresponding paper.\n\nThis paper also lacks innovations. The authors utilize several existing techniques like gradient ascending from GLASS to construct anomalies in the feature space.\n\nGLASS is quite an important baseline for this paper, but the authors didn’t compare the proposed method to GLASS.\n\nThere are even no ablation studies."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A Contrastive Learning based Method for Multi-Class Anomaly Detection"
},
"_bibtex": {
"value": "@misc{\nguo2024clad,\ntitle={{CLAD}: A Contrastive Learning based Method for Multi-Class Anomaly Detection},\nauthor={Runtang Guo and Hong Li and Tongfei Chen and Yuguang Yang and Peng Zhou and Linlin Yang and Guodong Guo and Baochang Zhang},\nyear={2024},\nurl={https://openreview.net/forum?id=zE4mL85zgg}\n}"
},
"abstract": {
"value": "Anomaly detection is crucial yet challenging in industrial production, especially in multi-class scenarios. Existing high-performance unsupervised methods often suffer from low efficiency and high model complexity. While lightweight discriminator-based detectors have been proposed, they are typically designed for single-class detection and exhibit significant performance degradation when extended to multi-class tasks. To address these limitations, we propose a novel Contrastive Learning-based multi-class Anomaly Detection (CLAD) method.\nOur approach first encodes multi-class normal images to generate normal samples in the feature space, then synthesizes anomalous samples in this encoded space. We then employ an Adapter network to compress the samples and leverage contrastive learning to effectively cluster normal and anomalous samples across multiple classes. Finally, a discriminator network is used for anomaly classification and identification. By leveraging anomaly sample generation and a two-stage training process, our framework achieves state-of-the-art performance on the MVTec and VisA datasets under the discriminator-based paradigm. Our key contributions include a novel framework for multi-class anomaly detection, efficient sample generation techniques, and a comprehensive evaluation of model configurations."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Runtang_Guo1",
"~Hong_Li4",
"~Tongfei_Chen5",
"~Yuguang_Yang1",
"~Peng_Zhou14",
"~Linlin_Yang1",
"~Guodong_Guo1",
"~Baochang_Zhang1"
]
},
"authors": {
"value": [
"Runtang Guo",
"Hong Li",
"Tongfei Chen",
"Yuguang Yang",
"Peng Zhou",
"Linlin Yang",
"Guodong Guo",
"Baochang Zhang"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Industrial anomaly detection",
"Multi-class anomaly detection",
"Contrastive Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "guo|clad_a_contrastive_learning_based_method_for_multiclass_anomaly_detection"
},
"pdf": {
"value": "/pdf/79e08af65187e5e7b968b9a2522ab72bd67be97f.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "CLAD: A Contrastive Learning based Method for Multi-Class Anomaly Detection"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zEPYCDaJae | DATASEA - AN AUTOMATIC FRAMEWORK FOR COMPREHENSIVE DATASET PROCESSING USING LARGE LANGUAGE MODELS | main | Active | Automated Data Processing;LLM;Data Pipeline Automation;NLP;Data Mining | infrastructure, software libraries, hardware, systems, etc. | 1;3;3;3 | 5;3;3;5 | 1;2;2;2 | 1;2;2;1 | 2;2;3;2 | 2.5 | 4 | 1.75 | 1.5 | 2.25 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Authors seemed to give LLMs way more trust while processing data engineering on computer systems, involving unrestricted LLMs in such a field is not a good idea, I would suggest more ethical review for potential security concerns."
},
"flag_for_ethics_review": {
"value": [
"Yes, Privacy, security and safety"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The multi-chunk strategy as the authors described in the paper, is a well-engineered strategy for building large-scale data RAG systems, however, there's not a universal way for chunking the dataset or documents, what specific method did you use to \"breaking the data into manageable sections while maintaining context across chunks.\"\n\n2. Since this is somehow like an end-to-end pipeline for dataset search on the web, would the authors mind sharing with me the overall latency of searching DataSEA? For example, suppose you are searching for the COCO dataset, regardless of the dataset download overhead (since it is related to your network bandwidth). What is the estimated waiting time I'm going to expect?\n\n3. One particular thing I'm also interested in is what kind of specific challenge did authors faced in this work. It's never been a settled one for processing data in the wild of free forms, there must be some different tasks remaining before and after this work. The authors did not, at least I did not, fully discuss the challenges for LLMs to process data. Understanding these points would also help understand the contribution proposed in this work.\n\n4. I'm curious about how authors process those huge datasets, e.g., datasets with 10GB+ single files, they can be binary-formatted or something else, making it extremely hard for simple general LLMs to do anything about it."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This work wrapped previous work in automated dataset discovery and analysis using the power of LLMs. DataSEA here is a fully automated framework for many data engineering applications, which is a creative combination of existing tools and engineering results. On the other hand, this work is a good start toward many automated information management systems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper focuses on data management using current techniques of LLMs, specifically, it uses first LLMs to provide a fully integrated solution for dataset discovery, evaluation, and custom analysis using large language models. It introduced the trilogy of automated dataset processing in the paper, Search, Evaluation (by LLMs), and Analyze (also by LLMs), which is an endorsable innovation in processing web data based on LLM."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I'm concerned about the feasibility of this work in real applications, it seems the data processing itself is only somewhat related to tasks like document management with web search discovery, but lacks enough contribution. A comparative analysis with current or previous relevant methods could highlight DataSEA's strengths for other researchers to improve it and provide clearer insights into its performance and feasibility.\n\n2. On the other hand, the type of data this work can support is also very limited. As pointed out in their work, it is unable to process databases, making the work have less contribution in real applications, since nearly all the existing proven-valuable data is stored in different purpose-designed databases.\n\n3. This work heavily leaned on existing tools such as search engines and LLMs, which limits the contribution and novelty of the work. From my perspective, this paper uses LLMs and Google Search as a web crawler application combined with a few HTML tricks. It neither discussed the reliability of this framework using two existing tools nor evaluated it. In this paper, authors employ the LLMs and let them do their prompt-given jobs, if so, might not be a good research paper, but a good technical engineering application. It would be good if authors could showcase a few pieces of innovation beyond just combining existing tools, another good point would be elaborating your novel contribution at the beginning of the paper.\n\n4. The scope of this work seems too large and underestimates the complexity of actual web data processing. Real such data engineering means getting hands dirty from all-sourced data to efficiently store data, I'm conservative about whether LLMs are able to handle various sources, types, sizes, structures, and containers of data. I'm also curious about how authors process those huge datasets, e.g., datasets with 10GB+ single files, it would help to provide more detailed examples or some case studies showing how your system handles complex, real-world data processing scenarios."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "DataSEA offers an agentic framework to test how LLMs can help the data processing pipeline.\n\nThe paper is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents DataSEA, an automated framework for dataset processing that leverages LLMs to streamline dataset discovery, evaluation, and analysis. DataSEA consists of three core modules—Search, Evaluate, and Analyze—that autonomously handle tasks from locating datasets on the web to analyzing and generating code for custom visualization. The system aims to significantly reduce the manual labor associated with data preparation, allowing users to input a dataset name and receive automated support for finding, organizing, and analyzing data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Real-world Relevance of the Framework**: The paper suggests that LLM agents are capable of downloading datasets, finding metadata, and locating relevant websites independently. However, major dataset platforms (e.g., Hugging Face, Kaggle) now offer easy-to-use APIs for downloading datasets and metadata. This raises the question: why test if an LLM agent can handle these tasks from scratch, rather than prompting it with relevant API code? It would be beneficial for the authors to discuss the practical use cases for DataSEA given existing tools.\n- **Lack of Benchmark Focus**: The benchmark includes three components, each targeting different tasks. The search module retrieves dataset-related information, the evaluation module retrieves related literature and generates metadata, and the analysis module handles dataset downloading and visualization. However, it’s unclear what the unique challenges are for each module, which task presents the most significant bottleneck, and where the main challenges lie.\n- **Dataset Size**: The evaluation dataset is relatively small, with only 100 datasets tested.\n- The prompts in the appendix are poorly formatted and truncated due to page limits."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What is the model size used for Llama 3 in line 325, i.e., 8B, 70B? \n2. Can you reveal more experiment details based on the domain area of datasets? The author's team discussed the limitations of the system on biological fields so far.\n3. Can you describe a running example of how this system works? As the reviwer can't infer much detail from the current paper. Search: what would the optional details be? Extract: what would the custom property be? Analyze: user requirements?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1: The DataSEA system cooperates with multiple LLMs responsible for three core modules. Prompt engineering and a multi-chunk strategy are widely applied.\nS2: The paper claims to be the first to provide a fully integrated solution for dataset discovery, evaluation, and custom analysis using LLMs. By automating these processes, DataSEA has the potential to significantly impact data-driven research by reducing the time spent on preliminary data handling tasks.\nS3: This paper presents its objectives, the problem it aims to solve, and the solutions it proposes in a clear manner. Meanwhile, the author team has designed a bunch of experiments to support the system's robustness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a holistic and broad system, DataSEA, for dataset processing, including searching, evaluation, and analysis. This system is built on top of a set of LLMs driven by curated prompts. The author's team has conducted a series of experiments to evaluate the performance in terms of different processing speeds, and the result turned out to be promising. Code implementations are publicly available."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Grand but Sketchy Framework: The paper presents DataSEA as a comprehensive solution for dataset preprocessing, which is an ambitious goal. However, the description of the search, evaluation, and analysis modules, while innovative, may lack in-depth observation. It's not quite convincing to me whether the system is actually useful in wild dataset processing. For instance, line 330 quotes that it takes about 3-5 mins for high-speed mode. Also, the whole system employes LLM heavily in every stages as well as integration of Google product APIs, will it leads to a high cost?\n\n- Missing baseline: Comparative analysis is crucial for establishing the advancement of the system over existing ones. The paper could be improved by providing a section (or figure) that directly compares DataSEA's outcomes with those of other tools or frameworks mentioned in the related work section. If there are no direct competitors, the authors should explain why this is the case and how DataSEA's approach is fundamentally different or more effective.\n\n- Poor Presentation: The figures look conceptual but lack illustration with any examples. To strengthen this part of the paper, the authors could Include case studies or use cases that demonstrate the system's capabilities in real-world scenarios. In particular to content writing, the author team should amend properly in revision:\n1) Line 99 mentioned the system is powered by a set of LLMs. But model versions seem not aligning with the experiment settings in line:324. \n2) Line 158-163, the summary part appears to be suitable for the ending part of the introduction section, not the related work.\n3) Improve all in-line references and labeling of every figure or table.\n4) Wrong Table-of-Content in Appendix section."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1. What is the formal problem definition? Does the proposed solution address all aspects of dataset search, or are there limitations in scope?\n\nQ2. What are the specific scopes of data evaluation and analysis in the system? Does the framework aim to address all types of data evaluation and analysis problems, or are there certain boundaries or limitations?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "This paper tries to automate the entire dataset handling pipeline, from discovery to evaluation and analysis, significantly reducing manual labor."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DataSEA (Search, Evaluate, Analyze), a fully automated framework for dataset processing using large language models (LLMs). Its goal is to streamline the data handling pipeline, reducing the manual effort involved in dataset discovery, preparation, and analysis."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Key Weaknesses:\n\nW1. Lack of Formal Problem Definition:\n\nThe scope of \"dataset discovery\" and \"dataset processing\" is not clearly defined in the paper. While the authors claim that \"DataSEA is among the first to provide a fully integrated solution for dataset discovery, evaluation, and custom analysis using large language models,\" it remains unclear what specific types of datasets are targeted and how the boundaries of these processes are drawn. A formal definition of the problem and its components is essential to frame the solution appropriately.\n\nWhat types of datasets does DataSEA aim to handle?\nWhat are the specific steps involved in dataset discovery, evaluation, and analysis?\nWhat are the inputs and outputs at each stage of the pipeline?\n\nFor example, whether the user can search the dataset like \"COVID-19 cases in CA in the last season of 2022?\" or \"Give me datasets about house price is LA in 2023.\"\n\nW2. Simplified Approach to Dataset Discovery:\n\nThe dataset discovery aspect could be seen as limited, given that existing tools like ChatGPT for Google Search or Google Dataset Search (and many others, see (https://anaconda.cloud/useful-sites-finding-datasets)) could be easily integrated to achieve similar outcomes. The paper does not explore how DataSEA improves upon or distinguishes itself from these existing dataset discovery engines, raising questions about its novelty in this area. Therefore, it is suggested to:\n\n- Provide a comparison table showing the capabilities of DataSEA versus existing tools like Google Dataset Search and ChatGPT.\n- Explain any unique features or improvements DataSEA offers over these existing solutions.\n- Discuss why integrating existing tools was not chosen as an approach, if applicable.\n\nW3. Ambiguity in Types of Evaluation:\n\nThe different types of evaluations performed by DataSEA are not well-defined. While the paper suggests that GPT can handle these evaluations, it lacks a formal breakdown or categorization of the evaluation types, leaving the reader unclear about the specific contributions and effectiveness of the system.\n\nCan I ask questions like \"how many data errors exist in the searched dataset.\"\n\nW4. Undefined Scope of Custom Analysis:\n\nThe notion of \"custom analysis\" remains vague and insufficiently described. It is unclear what kind of analyses the system can perform and how flexible or adaptable these are to various research scenarios. Without clear examples or a formal definition, the custom analysis capability is hard to evaluate or differentiate from standard LLM tasks.\n\nCan I ask questions like \"what will the stock price trend next month?\"\n\nW5. Lack of Clear Comparisons with Baseline Methods:\n\nAlthough there are many LLM-powered methods available for automated evaluation and analysis (e.g., using prompts with GPT models), the paper does not provide a comparison with state-of-the-art baselines. Without experimental results that benchmark DataSEA against these existing solutions, it is difficult to assess its effectiveness or performance improvements. The authors could compare with\n\n- Standard LLM prompting approaches for dataset analysis,\n- Existing automated data analysis tools, or\n- Manual expert analysis as a human baseline\n\nExperimentation Weaknesses:\n\nW6. Missing Comparisons with LLM-Based Methods:\n\nThe experiments do not compare DataSEA’s performance with existing LLM-powered methods that could achieve similar results through effective prompting. This omission weakens the claims of novelty and superiority, as there is no evidence of how DataSEA outperforms such methods. \n\nIt is acceptable to include supplementary materials in the appendix. However, the formal problem definition and the proposed solutions must be clearly presented in the main body of the paper, rather than relying on simple descriptions"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A fully automatic system leveraging large language models to streamline dataset acquisition, metadata extraction, and preliminary analysis, enhancing research efficiency and data exploration."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024datasea,\ntitle={{DATASEA} - {AN} {AUTOMATIC} {FRAMEWORK} {FOR} {COMPREHENSIVE} {DATASET} {PROCESSING} {USING} {LARGE} {LANGUAGE} {MODELS}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zEPYCDaJae},\nnote={under review}\n}"
},
"abstract": {
"value": "In the era of data-driven decision-making, efficiently acquiring and analyzing\ndiverse datasets is critical for accelerating research and innovation. Yet, traditional manual approaches to dataset discovery, preparation, and exploration\nremain inefficient and cumbersome, especially as the scale and complexity of\ndatasets continue to expand. These challenges create major roadblocks, slowing down the pace of progress and reducing the capacity for data-driven breakthroughs. To address these challenges, we introduce DataSEA (Search, Evaluate, Analyze), a fully automated system for comprehensive dataset processing, leveraging large language models (LLMs) to streamline the data handling\npipeline. DataSEA autonomously searches for dataset sources, retrieves and organizes evaluation metadata, and generates custom scripts to load and analyze\ndata based on user input. Users can provide just a dataset name, and DataSEA\nwill handle the entire preparation process. While fully automated, minimal user\ninteraction can further enhance system accuracy and dataset handling specificity.\nWe evaluated DataSEA on datasets from distinct fields, demonstrating its robustness and efficiency in reducing the time and effort required for data preparation\nand exploration. By automating these foundational tasks, DataSEA empowers\nresearchers to allocate more time to in-depth analysis and hypothesis generation, ultimately accelerating the pace of innovation. The code is available at\nhttps://github.com/SingleView11/DataSEA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Automated Data Processing",
"LLM",
"Data Pipeline Automation",
"NLP",
"Data Mining"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9ce79fc7a36ed42e392aa531f929e0b809dee7a5.pdf"
},
"presentation": null,
"primary_area": {
"value": "infrastructure, software libraries, hardware, systems, etc."
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/38c48d32357739f260192fe210b9fb07b7c51349.zip"
},
"title": {
"value": "DATASEA - AN AUTOMATIC FRAMEWORK FOR COMPREHENSIVE DATASET PROCESSING USING LARGE LANGUAGE MODELS"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zET0Zg71WT | Structure-aware Attention based on Vector Symbolic Architectures | main | Active | transformers;attention;vector symbolic architectures;neurosymbolic ai;hyperdimensional computing | neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.) | 3;3;3;6 | 4;3;3;2 | 2;3;2;3 | 1;3;3;3 | 1;2;3;3 | 3.75 | 3 | 2.5 | 2.5 | 2.25 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How does the runtime of GHRR compare to that of the vanilla Transformer? Given that vanilla attention has quadratic complexity, it would be good to know the complexity of GHRR relative to the standard self attention mechanism."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors draw useful connections between self-attention based Transformer models and Vector Symbolic Architectures, and use it to derive mathematical equivalence between QKV hypervectors and the self-attention mechanism.\n2. The authors use the insights to construct self-attention mechanisms for more complex data, proposing a GHRR-based Graph Transformer architecture for graph inputs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a Transformer encoder architecture based on Generalized Holographic Reduced Representations (GHRR), a Vector Symbolic Architecture (VSA) paradigm capable of implementing data structures including attention mechanisms. The proposed architecture uses binding-based positional encoding to encode sequential data, and also supports graph inputs. The architecture is evaluated on language modeling as well as node and graph classification tasks, showing better perplexity on language modeling tasks and comparable performance on vertex classification."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The benchmarking of GHRR is limited; next token prediction perplexity is reported on two language datasets in Table 2 against a vanilla transformer, however the language modeling results would be strengthened by benchmarking on more recent NLP benchmarks and tasks, for instance the LAMBADA dataset for natural language understanding. Performance on a node classification and graph classification task is reported in Tables 3 and 4, however both experiments are missing baseline graph transformer models such as GPS Graph Transformer [1] and Graphormer [2]. Evaluating on more standard GNN benchmark datasets, such as ZINC and Open Graph Benchmark datasets, would also strengthen the empirical results of GHRR.\n\n1. Rampášek, Ladislav, et al. \"Recipe for a general, powerful, scalable graph transformer.\" Advances in Neural Information Processing Systems 35 (2022): 14501-14515.\n2. Ying, Chengxuan, et al. \"Do transformers really perform badly for graph representation?.\" Advances in neural information processing systems 34 (2021): 28877-28888."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Is the $\\delta$ similarity measure used in the model itself?\n- How were the hyperparameters for model training chosen?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper considers an interesting goal: implementing transformers using GHRR, which has rich algebraic structure\n- A clear and informative description of VSAs and GHRR is provided.\n- The proposed model is original; both for sequences and graphs.\n- Valuable experimental results are demonstrated"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new transformer architecture based on vector symbolic architectures (VSAs). The paper uses the Generalised Holographic Reduced Representation implementation of VSAs, which represents tokens as complex hypervectors ($\\mathbb{C}^{D\\times m \\times m}$), and models interactions between these through bundling and binding algebraic operators.\nThe paper implements a transformer for sequences, and a transformer for graphs using GHRR, and demonstrates experimental results on language modelling and vertex classification tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The presented results for the language modelling task use a single baseline, the vanilla transformer. For this model, the chosen embedding size is quite small, compared to those in the literature. It is also uncommon that the hidden dimension for the transformer is smaller than the model dimension.\n- No hyperparameter search is described for either task.\n- The presented results thus are not entirely informative about the model's performance.\n- While VSA's are known to have rich algebraic structure, the paper does not discuss this for the presented model. It would have been valuable to demonstrate whether, for example, semantically similar words are mapped to GHRR representations with high similarity as measured by $\\delta$ described on line 170. This would serve to motivate the architecture better.\n- Related to the above, it is not clear from line 242 whether the GHRR version of attention uses the similarity measure from the VSA, which would be most natural."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- unclear to me why there was a significant performance improvement over transformers if the attention mechanism is theoretically the same. Could you perhaps elaborate on the specific differences in the GHRR transformers that could contribute to the performance differences in the discussion/results? Ideally provide some analyses to causally investigate the important differences.\n- why does the VSA formulation help with graphs in a way that vanilla transformers cannot already do? Graphs can readily be encoded as tokens in a context."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- referred to bridge between transformers and hopfield networks\n- presented material in a digestible way even for those who have never heard of VSAs\n- demonstrate a formal, theoretical unification between VSAs and transformer attention\n- support theoretical claims with empirical evidence\n- show that their formulation can be readily used in another domain (graph classification)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors present a theoretical bridge between Vector Symbolic Architectures and transformer attention through the specific VSA of Generalized Holographic Reduced Representations. They show how the transformer attention mechanism can be derived as a specific formulation of the GHRR with some slight relaxations. They then show that their derivation is correct by implementing it and comparing against vanilla transformers in a few settings. They also show how the VSA formulation more naturally extends to graph learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- as I have never encountered VSAs before, I'm unsure about impact/significance of VSAs in general. Does the broader ICLR audience care about VSAs? Why should they care?\n- difficult to understand the variants presented in Table 2. It would be nice if the differences between the variants was more clearly summarized closer to the point at which they are presented. Were they described in the methods?\n- would have appreciated a comparison of the graph and vertex classification results to vanilla transformers\n- generally difficult to understand some of the notation (probably due to my lack of exposure to VSAs)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1) Can you please provide evidence of what happens empirically when the model is used with a token context window n larger than m?\n\n2) Can you elaborate on the effect of relaxing unitarity of W?\n\n3) Can you provide experiments on widely accepted graph benchmarks, such as those on the Open Graph Benchmark (OGB)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The idea of deriving a more general framework for self-attention through VSA is interesting. In particular as it may enable a principled way to manipulate the inductive bias of the attentional module by operating on how the underlying vector space is generated and how positional information is embedded in the representation by the algebra operators. This makes the work, in principle, able to achieve some impact on the community.\n\n * The proposed approach, surprisingly, does not imply a substantially higher parameterization and computational costs (at least according to what the Authors state in the paper, as there is no specific experimental data supporting this claim)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The work discusses how a specific implementation of Vector symbolic architectures (VSA) can be interpreted as a multi-head attention model for compound data encoding. The manuscript shows under which restrictions of the VSA model its computation map to self-attention with fixed context window. It further provides a discussion on how to define specialized positional encodings to process sequential and graph structured data. Experiments are provided on simple next token prediction, node classification and graph classification datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The work appears rather derivative when coming down to the learning methodology. The model underlying the proposed GHRR Transformer derives largely from Yeung et al 2024, aside from the part which integrate positional encoding information in the neural representation. Overall, the proposed equivalence of GHRR to self-attention boils down to a resemblance of the matrix operations involved in self-attention, when substantial constraints are imposed to the GHRR model. There is really no deep study and assessment of the relationship between the GHRR Transformer and self-attention, and of what are the consequences of some design choices and simplifications introduced in GHRR. Without a deeper insight on the key advantages introduced by the proposed GHRR-Transformer/Self-attention equivalence, this may appear rather empty and mostly a technical exercise. The equivalence itself hinges on quite strong simplifying assumptions, which are mentioned but whose impact is not discussed in depth. For instance, GHRR assumes a fixed context window: if such assumption is relaxed, then the dimensions of the embedding becomes entangled, loosing the very motivation for having introduced an holographic representation in a first instance. This aspect is mentioned in the paper, but only marginally, while it seems a major limitation of the approach.\n\n* The work appears derivative also with respect to the contribution on graph processing. The encoding of graph data in the proposed GHRR Transformer is heavily based on Poduval et al, 2022. The paper is not clear about what novel insight is being provided on graph encoding by the proposed approach against the one of GraphHD. It would be helpful if the Authors could elaborate more on what are the novelties of the proposed approach agains GraphHD. The technical discussion on the properties of the GHRR Transformer against what is the state of the art in graph NNs, remain on a shallow level. For instance, if I got this right, the disentanglement properties of the proposed Holographic embedding should allow to memorize exact information about inter-nodes relationships in the vertex encoding. This may be relevant with respect to the literature discussion about how to design graph NNs capable of capturing long range node relationships, surpassing limitations such as oversquashing. The work appears not well positioned with respect to relevant literature in this respect.\n\n * The work, at some point, relaxes the assumption on W being unitary. My understanding is that such an assumption in needed to preserve the holographic nature of the embeddings. It cannot be relaxed without a proper discussion of how this affects the properties of the model (the discussion should be both theoretical and empirical in this sense). Taking assumptions which contradict the very fundamental reasons for having introduced the holographic approach in the first instance, reduces the soundness of the contribution.\n\n * The empirical analysis is very limited in scope, depth and reproducibility. Little details are provided as concerns the experimental setup and no reference to code is given (neither public anonimyzed nor attached to the submission as supplementary). It would be helpful if the Authors can provide additional details to facilitate reproducibility, including choice of optimizers, hyperparameters, as well as to gain a deeper insight into the computational charateristics of the approach, such as its computational costs and parameterization in the experiments.\n\n* The experiments on sequential data are too limited: only a single 2-dataset experiment with simple next token prediction tasks is provided. If the approach is put forward as an holographic equivalent of Transformers (in year 2024), then one would expect to see experiments on how the approach can be used to at least match a Transformer on proper language modeling tasks. The experiments with graph data are quite poor. There is no reference baseline model from literature (I would have expected a comparison with at least the most popular Graph Transformer models). The datasets used in the experiments are not widely recognized benchmarks by the graph NN community and this does not allow to compare the proposed model against the relevant related literature. The dataset on graph classification cannot be considered a proper graph benchmark: deciding between a fully connected and a non-fully connected graph does not require a model with the ability to capture complex structured relationships in a graph. It suffices a model which can count the number of ones in the adjacency matrix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024structureaware,\ntitle={Structure-aware Attention based on Vector Symbolic Architectures},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zET0Zg71WT},\nnote={under review}\n}"
},
"abstract": {
"value": "The introduction of the Transformer has brought about a revolution in AI. Central to the success of the Transformer architecture is the self-attention mechanism, enabling context dependence and long-range dependencies between tokens. Recent work has drawn an equivalence between Hopfield networks, a kind of associative memory model, and Transformers. In this work, we leverage this bridge, using Vector Symbolic Architectures (VSA), a brain-inspired computational paradigm capable of representing and implementing data structures, including associative memory models, to define a broad class of attention mechanisms catered for complex data types. In particular, we use Generalized Holographic Reduced Representations (GHRR), an implementation of a VSA, as the foundation for our proposed class of attention mechanisms. We show that GHRR is capable of implementing attention and design a GHRR Transformer encoder architecture based on the demonstrated mathematical equivalence. We propose a new kind of binding-based positional encoding based on methods used in VSAs for encoding sequential information. We extend the attention mechanism in our architecture to support graphs, inspired by techniques used in VSAs to encode graph representations. We evaluate the GHRR Transformer on language modeling, vertex classification, and graph classification tasks. Results suggest that our approach provides benefits in language modeling and graph classification tasks compared to baseline models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"transformers",
"attention",
"vector symbolic architectures",
"neurosymbolic ai",
"hyperdimensional computing"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/df207c160499bd6f96e1619821cf6a7056f97dce.pdf"
},
"presentation": null,
"primary_area": {
"value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Structure-aware Attention based on Vector Symbolic Architectures"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zEUDoD9cU9 | CycleVTON: Improving Diffusion-Based Virtual Try-On with Cycle-Consistent Training | main | Active | virtual try-on;diffusion;cycle-consistency | applications to computer vision, audio, language, and other modalities | 5;5;5;5 | 3;5;4;5 | 3;3;3;2 | 2;3;2;2 | 1;3;3;3 | 5 | 4.25 | 2.75 | 2.25 | 2.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to \"Weaknesses.\""
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper proposes a new cycle-consistent diffusion-based framework for virtual try-on, named CycleVTON.\n\n2. This paper introduces two complementary networks in CycleVTON. Alongside the conventional try-on network, we innovatively incorporate a clothing extraction network that extracts clothing from the human body and normalizes it to a frontal view.\n\n3. This paper proposes a cycle-consistency optimization strategy, which aligns the generated dressed human with real images and the extracted clothing with its true frontal appearance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes CycleVTON, which is a virtual try-on framework that uses a cycle-consistent diffusion-based approach. It consists of two conjugated networks: a regular try-on network and a clothing extraction network. The clothing extraction network standardizes clothing into a front-facing format, allowing for alignment between generated and real images. This cycle-consistent optimization strategy enhances the retention of clothing textures and structures, ensuring realistic and accurate clothing generation. The conjugated network structure supports traditional virtual try-on as well as flexible clothing extraction and exchange between individuals. Experiments on VITON-HD demonstrate its effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In the proposed architecture, the CLOTHING EXTRACTION NETWORK differs only in its function, yet it does not fundamentally diverge from the ReferenceNet used in previous methods, such as IDM-VTON.\n\n2. The proposed Cycle-Consistency Optimization Strategy is successful. However, there is a lack of validation for the effectiveness of its subprocesses and sub-losses, such as $L^_h$ and $L^_c$.\n\n3. Some of the data in Table 1 seem to differ from those in the original paper, why?\n\n4. Please provide the code to demonstrate reproducibility.\n\n5. The entire process is overly engineered, lacking rigorous theoretical validation in this type of conference."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Can the CEN generalize to new datasets (such as DeepFashion)? \nDoes the CEN affect the generalization capability of the virtual try-on?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-written and easy to follow.\nThe proposed method outperforms existing baseline approaches on the VTON-HD dataset."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents CycleVTON, a framework for virtual try-on. CycleVTON consists of two main components: a virtual try-on network that synthesizes human images given a specific clothing style, and a clothing extraction network that generates clothing templates in a unified space. Experiments are conducted on VTON-HD to evaluate the performance of the proposed approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The idea of extracting human feature and clothing feature in a cycle framework is not novel, i.e., a similar work CycleVTON: A Cycle Mapping Framework for Parser-Free Virtual Try-On [Du et al., AAAI 2024]. The paper did not cite or discuss the CycleVTON paper [Du et al.]. A comparison against CycleVTON [Du et al.] should be provided since the ideas of the two papers are similar.\n\nThe technical contribution is limited. The virtual try-on part (ReferenceNet and denoising UNet) is borrowed from Animate Anyone [Hu et al.], and the main contribution is the idea of a clothing extraction network, which, however, has appeared in a previously published paper CycleVTON [Du et al.]. The technical contributions should highlight the differences against existing works.\n \nNo discussion on the failure cases (such as particular clothing types and poses). What are the limitations of the method?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Overall, CycleVTON represents an innovative approach to virtual try-on, balancing accuracy, flexibility, and robustness, with areas for improvement in handling complex clothing variations. But the cycle-consistent idea is not new, and please provide a detailed discussion on the paper: https://openaccess.thecvf.com/content/CVPR2021/papers/Ge_Disentangled_Cycle_Consistency_for_Highly-Realistic_Virtual_Try-On_CVPR_2021_paper.pdf"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The cycle-consistency strategy greatly enhances the model's ability to preserve clothing textures and structures, resulting in more realistic try-on outcomes.\n\n2. The dual-network design enables various applications, such as clothing extraction and swapping between persons.\n\n3. By utilizing clear frontal views for supervision, CycleVTON enhances adaptability to clothing deformations and variations in human poses, improving both supervision and robustness.\n\n4. It demonstrates superior performance, as the model surpasses state-of-the-art (SOTA) methods on the VITON-HD benchmark, excelling in both qualitative and quantitative measures, and highlighting its effectiveness in virtual try-on."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a cycle-consistent and diffusion-based framework for try-on. It consists of two networks: a regular try-on network and a clothing extraction network. The latter standardizes the clothing into a frontal view, allowing for a more realistic and accurate representation. Its consistent strategy optimizes the alignment between generated try-on images and original human images, as well as between extracted clothing and its accurate frontal view. This framework enables traditional virtual try-ons and adaptable applications such as clothing extraction and swapping. Experiments on the VITON-HD benchmark confirm CycleVTON’s superior performance compared to existing models. However, challenges remain in handling significant deformations and changes in clothing texture or orientation. Furthermore, the cycle-consistent idea is not new, and please refer to the https://openaccess.thecvf.com/content/CVPR2021/papers/Ge_Disentangled_Cycle_Consistency_for_Highly-Realistic_Virtual_Try-On_CVPR_2021_paper.pdf"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Significant clothing deformations or angle changes lead to artifacts such as wrinkling or stretching, which limit the model's ability to accurately reproduce the original textures and patterns.\n\n2. It depends on the standardized front views for optimal performance, which may limit its effectiveness with certain types of clothing.\n\n3. The dual-network cycle-consistent approach may raise computational costs and complexity, potentially affecting its use in real-time or resource-limited environments.\n\n4. The cycle idea is very close to this paper, so the novelty needs to be clarified.\nhttps://openaccess.thecvf.com/content/CVPR2021/papers/Ge_Disentangled_Cycle_Consistency_for_Highly-Realistic_Virtual_Try-On_CVPR_2021_paper.pdf"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. L258: Does the term \"frontal-view posture information\" refer to a canonical pose, or can it be any pose that is frontal? Additionally, is it correct to assume that the generated clothing image will be in this pose?\n\n2. Regarding the clothing extraction model, the input includes a photo of the clothed human, an image of the clothing itself, and the pose information. If the image of the clothing is already available, what is the purpose of using this model to extract the clothing again? Please clarify if there is a misunderstanding in my interpretation.\n\n3. Is there any limitations to the types of poses the model can handle?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The cycle consistency optimization strategy enables the two models to be trained jointly, providing the capability for both virtual try-on and virtual take-off of clothing.\n\n2. The input for the clothing image is not limited to a frontal view; the model can accept variations in the clothing image, which makes it more practical for real-world applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a 2D virtual try-on network capable of: 1) rendering an image of a human wearing a given piece of clothing based on an input human image and clothing image, and 2) extracting the image of clothing from a clothed human image. Existing methods have been utilized to extract information from the human image in task 1), such as pose, human parsing, and body mask. A cycle consistency optimization strategy is introduced to jointly optimize the two models for tasks 1) and 2), where the ground truth of model 1) can serve as input for model 2), and vice versa. This approach simplifies supervision and data acquisition while achieving a better representation of the overall appearance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The explanation of the methodology could benefit from clearer clarification, as it is difficult to understand which phrase refers to which \"image\" in the pipeline. For instance, in lines 261-267, adding notations and labeling components in Figure 2 would help avoid ambiguity. Readers may struggle to differentiate between the frontal-view image and the distorted image and add labels (e.g., A, B, C) to each image in Figure 2 and use these labels consistently in the text description will be helpful. Additionally, in Figure 2, using different clothing items for the try-on and take-off procedures would help reduce confusion, as presenting the same blue t-shirt makes the process harder to follow.\n\n2. The experiments were conducted on a dataset with professional fashion images, but in real-world virtual try-on applications, \"in the wild\" images are typically used. However, there is no experiment demonstrating how the proposed method performs on in-the-wild images. Could this suggest that the model may be overfitting to the fashion dataset and might struggle to adapt to more diverse, real-world images? the authors could conduct additional experiments on a dataset of non-professional, real-world images to demonstrate the model's generalization capabilities.\n\n3. The poses in the presented dataset appear to involve relatively limited movement. It would be beneficial to include examples or conduct additional experiments with more extreme or dynamic poses to demonstrate the model's robustness. \n\n4. L112 typo: ‘arping’ →’Warping’"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024cyclevton,\ntitle={Cycle{VTON}: Improving Diffusion-Based Virtual Try-On with Cycle-Consistent Training},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zEUDoD9cU9},\nnote={under review}\n}"
},
"abstract": {
"value": "We present CycleVTON, a cycle-consistent diffusion-based virtual try-on framework. Unlike existing methods that rely on a single try-on network, our model consists of two conjugated networks. In addition to the regular try-on network, we design a clothing extraction network that extracts the clothing worn by the person and standardizes it into a front-facing format. These two networks are symmetrical, enabling alignment between the generated dressed human and real images of dressed human, as well as between the extracted clothing and its front-facing ground truth. This cycle-consistent optimization strategy allows for enhanced retention of clothing textures and structures, ensuring a more realistic and accurate clothing generation in virtual try-on scenarios. Moreover, the conjugated network structure not only supports traditional virtual try-on but also allows flexible clothing extraction and clothing exchange between different individuals. The experiments on VITON-HD demonstrate the effectiveness of our approach."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"virtual try-on",
"diffusion",
"cycle-consistency"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d6f430bab3aa7094ff65ffc55a4f38a94857a30c.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "CycleVTON: Improving Diffusion-Based Virtual Try-On with Cycle-Consistent Training"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zEm5nXxiXU | AIDBench: A benchmark for evaluating the authorship identification capability of large language models | main | Active | large language models;privacy;authorship identification | datasets and benchmarks | 3;3;5 | 4;4;3 | 2;2;2 | 2;2;2 | 3;3;3 | 3.666667 | 3.666667 | 2 | 2 | 3 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "•\tIs it likely that the LLMs have been trained on much of the data of AIDBench? How much does this contribute to their performance?\n\n•\tIt’s okay not to have the formulas for standard metrics such as precision and recall. \n\n•\tPutting the random guess metrics from Table 3 in Table 4 / 5 would’ve made reading those tables much easier. \n\n•\tWill the dataset be released?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "•\tThe research question is interesting, namely whether LLMs can perform authorship identification tasks. \n\n•\tThe authors collect a new benchmark that could be useful for the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce AIDBench, a new benchmark to evaluate the ability of LLMs to perform authorship identification. The benchmark is made up of five domains and two evaluation settings: one-to-one author identification (authorship verification), and one-to-many author identification (authorship retrieval). Multiple LLMs are prompted, both with and without a topic-controlled prompt, and a RAG-based approach is proposed in cases where the length of the candidate texts exceed context length of the LLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "•\tIt’s not clear whether a new benchmark was needed. There are many authorship verification and authorship retrieval datasets that could’ve been used to evaluate the attribution abilities of LLMs. This would’ve also made comparisons against established methods easier. \n* Authorship Verification (One-to-One) – https://pan.webis.de/clef23/pan23-web/author-identification.html (essays, emails, interviews, and speech transcriptions)\n - https://pan.webis.de/clef22/pan22-web/author-identification.html\n - https://pan.webis.de/clef21/pan21-web/author-identification.html\n\n* Authorship Retrieval (One-to-Many) – \n - Test dataset from - https://arxiv.org/abs/2105.07263\n\n•\tIt would’ve been better to evaluate with metrics that account for class imbalance, such as the F1 score. This would sharpen the results in Table 2. \n\n•\tThe RAG-based approach relies on semantic embeddings for it search. Given the nature of the task, it would’ve been more natural to rely upon stylistic embeddings such as the following:\n- https://aclanthology.org/2022.repl4nlp-1.26/\n- https://aclanthology.org/2021.emnlp-main.70/\n- https://arxiv.org/abs/2410.12757\n\n•\tAlthough AIDBench contains 5 domains, it seems that only two domains were evaluated on (in the main text), namely the Guardian and Research Papers. \n\n•\tIn the One-to-Many scenario, it’s not clear whether one can attribute a research paper to a single author, since they’re collaborative pieces of writing after all. I am not sure what to make of the One-to-Many results in this scenario. \n\n•\tThere are no baselines compared against. Some of the papers linked above, as well as previous submissions to the PAN CLEF Author Verification challenge would’ve been good baselines. \n\n•\tI found the description of the way the one-to-many subsets were created to be confusing. Lines 365-369."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In the one-to-many identification setting, the authors randomly sample a number of authors and compile all of their writings into a set. Does this approach sufficiently simulate real-world conditions? Could this method inadvertently bias the results by creating sets where all texts are from the same author, and if so, how could this be mitigated?\n\nIn section 2.4, there is a discussion about using cosine similarity for authorship identification. Should the paper provide more rigorous discussion on whether high cosine similarity actually correlates with authorship? Could high similarity merely indicate shared topics or content rather than distinct writing styles, and if so, how could this be addressed?\n\nDoes the paper sufficiently address the potential limitations posed by potential data leakage? What strategies could the authors adopt to mitigate or acknowledge this potential bias in their results?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The research shows that LLMs, particularly when enhanced by the RAG method, can outperform random chance in identifying authorship, demonstrating potential privacy risks in de-anonymizing texts in various systems.\n- The paper introduces a benchmark that includes a variety of datasets from different domains, allowing for a thorough evaluation of LLMs in authorship identification.\n- The authors address LLM limitations with long input by introducing a RAG-based method, which improves large-scale authorship identification tasks with large number of candidate authors and text.\n- It highlights the underexplored privacy risks posed by LLMs, bringing attention to the potential de-anonymization of authors, particularly in sensitive environments like peer reviews."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces the AIDBench benchmark that includes a diverse range of datasets to systematically test LLMs’ authorship identification capabilities. The authors test LLMs using two tasks. To enhance LLM performance in authorship identification when texts are too lengthy, the paper introduces a RAG-based method to improve accuracy. This method involves first filtering candidate texts and retrieving the most relevant chunks to keep within the LLM’s context window limits."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper's methodology for one-to-many identification may have a potential problem. By randomly sampling a number of authors and placing all their writings into a set, there is a risk that all texts in the set come from the same author. This introduces a bias and undermines the validity of the experiment.\n- The paper uses identifying anonymous reviewers as one of the motivations, yet the dataset used for analysis consists of research papers rather than actual academic reviews. Consider modify the motivation part, as the two types of text differ substantially in terms of length, tone, and style. Reviews are typically shorter and more opinion-based, while research papers are formal and content-rich.\n- In Section 2.4, the paper doesn't clearly discuss whether high cosine similarity is a reliable indicator of authorship. Cosine similarity typically measures the overlap in content or topic rather than writing style. High similarity scores could simply reflect the fact that two texts discuss similar subjects rather than being written by the same author. \n- The paper should acknowledge the potential limitation of the benchmark dataset, particularly the likelihood that the data used in this study were also used to train LLMs.\n- Section 3.1 notes that the results in Table 2 lead to a different conclusion from prior work. However, this discrepancy may be due to the fact that the previous work employed a different evaluation metric (accuracy) and a different dataset."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The manuscript mentions that embedding models tend to focus more on semantic meanings than on linguistic characteristics. However, there might be instances where texts from the same author share similar linguistic styles but differ significantly in semantic content. How to ensure that the top-k candidates cover texts that belong to the target author in all scenarios?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors introduce the RAG approach to address the challenge of one-to-many authorship identification, which could handle large-scale text collections and improve the efficacy of identifying multiple texts by a single author across extensive datasets.\n2. The experiments conducted across various text datasets . By demonstrating the model's performance in different scenarios, the authors show the adaptability of the RAG method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new benchmark, AIDBench, to show the privacy risks posed by large language models (LLMs) in potentially compromising the anonymity of authors across various text formats. The Retrieval-Augmented Generation (RAG)-based method is proposed to help authorship identification for large-scale texts exceeding typical model context windows."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The abstract suggests that the aim of this study is to explore the privacy risks associated with the use of LLMs for recognizing the authorship of anonymous texts. However, the experimental section seems primarily focused on validating the authorship recognition capabilities of these models and proposing new method to enhance their efficiency. There might appear to be a gap as the experiments do not adequately assess the outlined privacy risks, nor do they propose potential mitigation strategies.\n2. While the the RAG method is proper for one-to-many authorship identification, providing a comparison with baseline models, particularly under conditions where text lengths do not exceed the model's context window, would enable a better evaluation of the RAG method’s effectiveness.\n3. The selection and filtering of datasets, such as the \"50 papers filter 10 papers\" scenario, might be crucial in evaluating the model's performance but are insufficiently described. A more detailed explanation of how these settings were chosen and optimized are expected.\n4. The results section lacks the further analysis of why certain models perform differently under various tests. For instance, the Claude-3.5-sonnet model significantly outperforms others in the task involving 5 authors and 50 papers. It would be insightful if the authors could discuss potential reasons for this model's superior performance and possible factors leading to failures in others."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce AIDBench, a benchmark for evaluating large language models' ability to identify the authorship of anonymous texts, highlighting new privacy risks"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024aidbench,\ntitle={{AIDB}ench: A benchmark for evaluating the authorship identification capability of large language models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zEm5nXxiXU},\nnote={under review}\n}"
},
"abstract": {
"value": "As large language models (LLMs) rapidly advance and integrate into daily life, the privacy risks they pose are attracting increasing attention. We focus on a specific privacy risk where LLMs may help identify the authorship of anonymous texts, which challenges the effectiveness of anonymity in real-world systems such as anonymous peer review systems. To investigate these risks, we present AIDBench, a new benchmark that incorporates several author identification datasets, including emails, blogs, reviews, articles, and research papers. AIDBench utilizes two evaluation methods: one-to-one authorship identification, which determines whether two texts are from the same author; and one-to-many authorship identification, which, given a query text and a list of candidate texts, identifies the candidate most likely written by the same author as the query text. We also introduce a Retrieval-Augmented Generation (RAG)-based method to enhance the large-scale authorship identification capabilities of LLMs, particularly when input lengths exceed the models' context windows, thereby establishing a new baseline for authorship identification using LLMs. Our experiments with AIDBench demonstrate that LLMs can correctly guess authorship at rates well above random chance, revealing new privacy risks posed by these powerful models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"large language models",
"privacy",
"authorship identification"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8cab422fe32dd1a1ffce73245de25ce69257eaac.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "AIDBench: A benchmark for evaluating the authorship identification capability of large language models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zFfZEQHUiv | Towards Pose-Free Dynamic Neural Fields: Leveraging Geometric Foundation Models | main | Active | Neural Rendering;Pose-free;Gaussian Splatting;Dynamic View Synthesis | applications to computer vision, audio, language, and other modalities | 3;3;5;5 | 5;5;4;3 | 2;3;2;2 | 2;2;2;2 | 3;2;4;2 | 4 | 4.25 | 2.25 | 2 | 2.75 | -0.904534 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I'm wondering about the setup of the ground truth data and novel view data in the proposed dataset:\n1. only 1-3 objects are dynamic in the synthesis space yet all the other 10-20 objects and backgrounds are still. Is it a reasonable ratio? if the still objects (including the simple background) are less challenging, will the error from the dynamic object be overwhelmed? Especially when the input camera poses go around the scene like a scanning, the still objects will get much benefit from this camera movement. It is recommended to justify this ratio of dynamic to static objects, perhaps based on real-world scenarios or existing datasets. Another idea is to conduct an ablation study varying this ratio to show its impact on the model's performance.\n\n2. In the appendix, it is described that the monocular input ground truth is the 100 frames from the first camera, and the novel view ground truth is the 100 cameras at time 0. Is the novel view evaluation only tested on time 0? Won't introduce bias? Please clarify if it has been considered to evaluate novel views at different time steps. Or at least discuss potential biases this evaluation method might introduce and how they might address or mitigate these biases."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper presents a significant advancement by proposing a batchwise optimization method for DUSt3R, enabling it to be used effectively for dynamic Gaussian Splatting (DGS) without relying on Structure from Motion (SfM) pipelines like COLMAP. This innovation is particularly valuable as it removes the dependency on pre-calibrated camera data, allowing the model to perform self-calibration even in scenes with extensive object and camera motion. By introducing this self-calibrating approach, the paper expands the application range of DGS, addressing a major gap in pose-free dynamic view synthesis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents SC-4DGS (Self-Calibrating 4D Gaussian Splatting), a novel approach for pose-free dynamic view synthesis from monocular videos. Current dynamic neural field models rely heavily on Structure from Motion (SfM) and static scene assumptions, which limit their applicability in real-world scenarios with significant motion. SC-4DGS addresses these limitations by jointly optimizing camera poses and dynamic Gaussian representations without requiring pre-calibrated camera data or static scenes.\n\nThe approach leverages DUSt3R, a geometric foundation model, to provide initial pose and point cloud estimations. It introduces batch-wise optimization and an extended motion representation tailored to DUSt3R’s capabilities, allowing the model to handle dense frames efficiently. SC-4DGS also incorporates several regularization terms to improve geometric accuracy in rendering.\n\nTo evaluate SC-4DGS, the authors introduce Kubric-MRig, a new benchmark dataset designed to test calibration and rendering performance in dynamic scenes with extensive object and camera motion. Experimental results show that SC-4DGS outperforms previous pose-free dynamic neural fields and achieves competitive results against state-of-the-art pose-free 3D neural fields."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Clarity of Figures and Tables: \nThe visual presentation of some figures and tables could be significantly improved for readability and clarity: Figure 1 is not illustrative and hard to follow even if combining the main content. At least it can add a legend or annotations that explain each variable clearly, making it challenging for readers to follow without detailed reference to the text. Adding a legend or in-figure labels could improve comprehension. Figure 4 would benefit from a horizontal arrangement of camera rotations, as it would make the sequence and rotation dynamics easier to interpret at a glance. Addressing these issues would make the paper more accessible, particularly in sections introducing new methods and experimental setups.\n\nExcessive Detail in Preliminary and Method Sections (Sections 3.1 and 3.3):\nSection 3.1 delves too deeply into the basics of 3D Gaussian Splatting, including eight detailed equations that do not play a critical role in subsequent sections. While introductory information is helpful, streamlining this part to focus only on the essentials would allow readers to concentrate on the novel contributions. Similarly, Section 3.3 introduces multiple variables in equations that are only referenced once, which adds cognitive load without enhancing understanding. Reducing or simplifying these equations, or moving some details to an appendix, could maintain the paper’s technical rigor while improving readability.\n\nDataset Design Limitations: \nThe Kubric-MRig dataset, while useful, is overly simplified in certain respects: The background lacks detail and depth variation, as it is primarily a simple ground plane. This simplification limits the model's ability to generalize to more complex real-world scenes where background intricacies affect depth and spatial perception. The dataset's objects are all synthetic geometric shapes or scanned objects with basic forms, which fall short of representing realistic, complex shapes such as human bodies, animals, or vehicles. Incorporating more diverse objects with varied textures and structures would enhance the dataset’s utility as a benchmark for dynamic view synthesis in practical applications. For example, including urban environments with buildings, natural scenes with vegetation, or indoor settings with furniture. For objects, adding articulated models of humans or animals, or complex mechanical objects like vehicles.\n\nLimited Novelty: \nWhile the batch-wise optimization approach for DUSt3R to enable its use in dynamic Gaussian Splatting is a valuable engineering contribution, the novelty is relatively incremental: The main contribution is an engineering improvement rather than a theoretical innovation, which may not meet the high standards of groundbreaking novelty expected at ICLR. \nOn the other hand, to strengthen the impact, the authors could discuss broader implications of this method, such as potential extensions or applications in other areas, or propose future directions that could build on this optimization. For example, it can be tested on other types of 3D reconstruction models including dynamic NeRFs which also rely on COLMAP, or if it has potential applications in fields like robotics or augmented reality."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1. The batchwise optimization makes joint optimization of pose and dynamic rendering possible, but is it capable of correcting pose estimation errors if DUSt3R provides a suboptimal initial pose estimation? In other words, how robust is the system to inaccuracies in the initial poses provided by DUSt3R?\nQ2. If SC-4DGS shares similar failure conditions with most SfM-based methods, it seems like the approach is merely combining a two-step process into one without addressing the inherent issue of pose estimation errors. In scenarios where ground truth poses are not available, what specific conditions would lead to failures in pose estimation and rendering for SfM-based methods, but would still allow SC-4DGS to correctly render dynamic scenes and estimate poses?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper presents a fully integrated pipeline that allows for pose-free dynamic Gaussian splatting rendering.\n\nBased on my understanding, this is more of a pipeline paper rather than a structure-based improvement. However, the batch-wise optimization introduces novelty by addressing the memory and computational limitations of DUSt3R, making the method scalable for dynamic scenes.\n\nA new dataset that addresses limitations that previous benchmark could not assess"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces SC-4DGS, a pose-free optimization pipeline for dynamic view synthesis by combining DUSt3R for initial self-calibration, DynMF for dynamic motion representation, and a novel batchwise optimization strategy. The method claims to eliminate the need for structure-from-motion and enables the joint optimization of camera poses and dynamic scene representations, achieving high-quality rendering for complex dynamic scenes in a monocular setting. It also introduces a new dataset, Kubric-MRig, to evaluate both camera calibration and novel view synthesis performance in dynamic scenes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposed method heavily depends on prior approaches such as DUSt3R for both pose estimation and dynamic rendering. As a result, any failure in DUSt3R directly causes the failure of the proposed method. Moreover, the paper lacks some critical comparative experiments. While it repeatedly claims that structure-from-motion (SfM) methods, like Colmap, often fail, leading to inaccurate pose estimation and rendering, it would be beneficial to include experiments that clearly show where SfM combined with standard 4DGS falls short and where SC-4DGS excels. This would more effectively demonstrate the advantages of the proposed approach.\n\nAdditionally, the paper feels somewhat incomplete and lacks polish in its writing. For example, in the ablation study on pose initialization strategies, the following statement is problematic: \"We explore four batch sampling strategies when initializing poses via DUSt3R: naive, sequential (SQ), strided batch (SB), and ours… SB uses the strided batch technique for optimization.\" This sentence is uninformative, as it merely reiterates the meaning of the abbreviation without adding value. Furthermore, the following sentence refers to \"Appendix 4.4\" for more details, but upon checking, there is no such appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1.Based on my understanding, RoDyNeRF uses an explicit neural voxel radiance field in the section 3. The paper’s description of it as an implicit representation is inaccurate and could lead to misunderstandings.\n2.In Table 1, the terms “wide viewpoints” and “large motion” are used but lack clear quantitative measures. Providing specific metrics or thresholds for these terms, such as angular range for viewpoints or displacement magnitude for motion, would help readers better understand how your dataset outperform than others.\n3.The preliminary section in 3.1 is extensive. If this content is not the authors’ own contribution, a concise overview would be more effective, allowing readers to focus on the novel aspects of the paper.\n4. DUSt3R requires multi-view images for point cloud reconstruction, similar to COLMAP. What scenarios does DUSt3R handle that COLMAP cannot? Identifying and explaining these differences would clarify DUSt3R’s advantages and justify its use over COLMAP. The author could provide more results to show DUSt3R outperform COLMAP.\n5.If my understanding is incorrect, please let me know. Most objects in the dataset are rigid objects, meaning they do not undergo mesh deformations like humans or animals do during motion, which makes the dataset less challenging. The authors could include some non-rigid objects in the dataset or discussing why you don't do that in the paper."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is among the first to achieve pose-free 4D Gaussian Splatting, which provides valuable insights for future research in dynamic scene reconstruction.\n2.The paper provides highly detailed mathematical formulations, which effectively build the theoretical foundation of SC-4DGS. Also, this thorough mathematical foundation enhances clarity, helping readers understand both the model structure and the optimization processes involved.\n3.The introduction of the Kubric-MRig dataset fills a gap in DVS benchmarking by incorporating extensive camera and object motion alongside ground truth data for calibration and novel view synthesis, providing a robust dataset for dynamic scene evaluation.\n4.The SC-4DGS method demonstrates large improvements over previous dynamic neural field approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the limitations of traditional dynamic view synthesis (DVS) from monocular videos, which relies on Structure from Motion (SfM) and thus requires stationary scenes. To overcome this, the authors propose SC-4DGS, a pose-free optimization pipeline for dynamic Gaussian Splatting (3DGS) that eliminates the need for SfM through self-calibration using geometric priors from DUSt3R. SC-4DGS introduces batchwise optimization and an extended motion representation to jointly optimize camera poses and scene representations in dynamic settings. Additionally, the paper introduces Kubric-MRig, a benchmark dataset with photorealistic scenes featuring complex camera and object movements for evaluating calibration and rendering quality. Experiments show that SC-4DGS outperforms existing pose-free 4D neural fields and performs competitively with 3D neural fields, proving effective for complex, real-world dynamic scenes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The SC-4DGS model lacks originality, as it largely combines the existing DYNMF and DUSt3R models. The primary change appears to be the replacement of COLMAP with DUSt3R for point cloud generation, without significant structural improvements. Demonstrating unique contributions or optimizations beyond this combination would strengthen the paper’s impact.\n2. Although the paper highlights the high computational complexity of DUSt3R, it lacks specific training time comparisons. Quantitative results showing the time reduction achieved through the paper’s optimizations would help illustrate the efficiency of SC-4DGS.\n3.The experiments mainly compare SC-4DGS with RoDyNeRF under conditions without ground-truth (GT) camera poses, and the results do not surpass methods using GT. This limited comparison, with only RoDyNeRF as a baseline in the absence of GT, raises concerns about the reliability of the findings.\n4.The paper states that most methods use COLMAP to serve as GT camera poses and point cloud; however, COLMAP estimates are not true GT values but approximations. Since SC-4DGS replaces COLMAP with DUSt3R for point cloud and pose estimation, it’s essential to demonstrate the advantages of this change. An experiment comparing COLMAP’s and DUSt3R’s effectiveness in scenarios where COLMAP fails but DUSt3R succeeds would substantiate SC-4DGS’s advantage.\n5.Although SC-4DGS is designed for monocular video inputs, the Kubric-MRig dataset features wide variation among its 100 cameras, and rapid camera switching may lead the monocular sequence to resemble stereo video. This could create inconsistencies when using the dataset to evaluate the performance of a strictly monocular input model. Adjustments to the dataset or a careful clarification of how it aligns with the paper’s objectives would address these concerns."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Since not mentioned in the paper, did you use the G.T. camera intrinsic? Or optimizing the camera intrinsic? Please make it clear in the paper. \n2. The proposed method seems to highly rely on DUSt3R which is an existing work for static scenes. But in the method section, the author claimed they used DUSt3R for camera pose initialization, I agree that DUSt3R can be a good initialization in some small or regular motion scenes, but I am wondering whether DUSt3R can work well in some large movement datasets such as DAVIS[1], iPhone,...? I am expecting the authors can conduct more experiments on DAVIS and iPhone to show the effectiveness and robustness of their method.\n3. Can the authors provide some motion mask samples generated by them? Since I think the motion mask estimation of RoDynRF is not that robust and effective.\n\n[1] Pont-Tuset, Jordi, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. \"The 2017 davis challenge on video object segmentation.\" arXiv preprint arXiv:1704.00675 (2017)."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors incorporate DUSt3R for a wonderful point cloud and camera pose initialization. \n2. The authors proposed a new dataset that can benefit the research in this field.\n3. The author provides experiments on both dynamic and static scenes."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is built on the existing work DUSt3R. Firstly, the authors implemented DUSt3R with their new way of batch-wise alignment for the point clouds and camera pose initialization. After the canonicalization of points, with the benefit of monocular depth estimation from DepthAnything and the motion mask estimation from RoDynRF, they introduce more regularization loss to jointly optimize the camera poses, and scene representations. In addition, they claimed to propose a more challenging dataset for DVS. However, I keep my doubts about the statement that the proposed dataset is challenging. Besides, their heavy reliance on the initialization of DUSt3R makes me doubt their contribution."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. From my perspective, this paper lacks comparisons with proper baselines. For example, there are some accepted works like casualSAM[1], LEAP-VO[2], ParticlesfM[3], ..., which I think should be compared with. Or can the authors provide some reasons for why lack of them?\n2. I think the authors should include the comparisons with COLMAP, since although COLMAP is somehow theoretically designed for static scenes, I believe it does not completely fail on most of the datasets. The exhaustive matching step can help COLMAP get rid of some (dynamic) outliers, even though some noise exists.\n2. I do not agree that in table 1, the authors claimed that the iPhone dataset does not contain large motion (e.g. 'spin', 'apple', 'space-out', 'pillow', 'teddy', ...).\n3. From my perspective, the authors introduce a lot of regularization terms, however, for dynamic scene experiments, the authors only conducted experiments on one public dataset, which I think is not enough.\n4. The authors claimed that COLMAP is very time-consuming, but I do not see any time comparisons with COLMAP or the existing baselines.\n\n\n[1] Zhang, Zhoutong, Forrester Cole, Zhengqi Li, Michael Rubinstein, Noah Snavely, and William T. Freeman. \"Structure and motion from casual videos.\" In European Conference on Computer Vision, pp. 20-37. Cham: Springer Nature Switzerland, 2022.\n\n[2] Chen, Weirong, Le Chen, Rui Wang, and Marc Pollefeys. \"LEAP-VO: Long-term Effective Any Point Tracking for Visual Odometry.\" In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 19844-19853. 2024. \n\n[3] Zhao, Wang, Shaohui Liu, Hengkai Guo, Wenping Wang, and Yong-Jin Liu. \"Particlesfm: Exploiting dense point trajectories for localizing moving cameras in the wild.\" In European Conference on Computer Vision, pp. 523-542. Cham: Springer Nature Switzerland, 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Pose-free 4D Gaussian Splatting for Causaully Captured Videos"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Pose-Free Dynamic Neural Fields: Leveraging Geometric Foundation Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zFfZEQHUiv},\nnote={under review}\n}"
},
"abstract": {
"value": "Dynamic view synthesis (DVS) from monocular videos has remarkably advanced in recent years, achieving high-fidelity rendering with reduced computational costs. Despite these advancements, the optimization of dynamic neural fields still relies on traditional structure from motion (SfM), requiring that all objects remain stationary during scene capture. To address this limitation, we present \\textbf{SC-4DGS}, a pose-free optimization pipeline for dynamic Gaussian Splatting (GS) from monocular videos, which eliminates the need for SfM through self-calibration. Specifically, we jointly optimize dynamic Gaussian representations and camera poses by utilizing DUSt3R, enabling accurate calibration and rendering.\nFurthermore, we introduce a comprehensive benchmark, \\textbf{Kubric-MRig}, that includes extensive camera and object motions along with simultaneous multi-view captures. \nUnlike previous benchmarks for DVS, where ground truths for camera information are absent due to the difficulty of capturing multiple viewpoints simultaneously, it facilitates evaluating both calibration and rendering quality in dynamic scenes.\nExperimental results demonstrate that the proposed method outperforms previous pose-free dynamic neural fields and achieves competitive performance compared to existing pose-free 3D neural fields."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neural Rendering",
"Pose-free",
"Gaussian Splatting",
"Dynamic View Synthesis"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c5f53331d12d457972a0c3ce3da4321f29f7c250.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Pose-Free Dynamic Neural Fields: Leveraging Geometric Foundation Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zG2vcC1l1f | KEA: Keeping Exploration Alive by Proactively Coordinating Exploration Strategies in Curiosity-driven Exploration | main | Active | Reinforcement Learning;Curiosity-based Exploration;Sparse Reward;Soft Actor-Critic | reinforcement learning | 3;3;5;5 | 4;4;4;4 | 3;2;3;2 | 2;1;2;2 | 2;3;2;3 | 4 | 4 | 2.5 | 1.75 | 2.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Some additional questions and suggestions:\n\n-In figure 3, the SAC line seems to be flat? I am surprised this task cannot be learned at all by SAC, and that the best method only reaches a ~60% success rate. Perhaps the degree of sparsity is not clear to me from the task description. Perhaps that sparsity could be elaborated on somehow?\n\n-I suggest adjusting page spacing so that the critical algorithms 3 and 4 are on the same page, since they are connected and key to this paper's contribution\n\n-Figure 4 feels like supplemental material to me, since it's basically just validating hyperparameter choices.\n\n-The caption on figure 6 should say what the shaded regions represent.\n\n-Also in figure 6, it looks like the algorithms haven't clearly converged on most of the three tasks. 500k environment steps is not a very large number, perhaps it would be worthwhile to run these experiments for the commonly-used 1 million steps to make sure there's separation at convergence?\n\n-Why is it valuable to vary update to data ratios so much? Is there a reason this is a key hyperparameter for this algorithm? If so that should be explained in the paper. It's not clear how this relates to explore-exploit tradeoffs\n\n-For that matter, why is sigma (which appears to be a key hyperparameter) not varied? It seems like performance should be sensitive to that parameter since it controls algorithm switching (the core contribution)\n\n-Further, when and how much does KEA actually switch between the policies? This is the core phenomenon of this paper, so surely there should be some confirmation it's actually switching regularly and in a structured way, no?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper is generally well written, and tackles a valid problem in RL, how to gracefully handle the explore-exploit tradeoff. The core results are clear, and the resulting algorithm seems to outperform the baseline by a significant amount on the evaluation task suite. The approach is, to my knowledge, original, though other methods have tackled this problem before in various ways."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Keeping Exploration Alive (KEA), an algorithm that switches between exploration policies depending on the value of the intrinsic reward in order to reduce sub-optimal performance due to explore-exploit tradeoffs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The core issue I have with this paper is that there's very little exploration of the proposed algorithm beyond claims of superior performance. The core concept is simple (not a bad thing), but there's no real consideration given to what makes it good or useful. Seeing as the core contribution of this work is very simple, it should be straightforward to do deeper analysis and benchmarking, but this paper doesn't do much of that, and largely just concludes \"it works.\"\n\nAs a result, I can't help but feel this paper is just sort of insubstantial and doesn't contribute that much to the field. It proposes an idea that is novel, but straightforward as an extension of past work, and doesn't do anything with the idea beyond show that it outperforms the baseline. If accepted as is, I'm not sure how much this will actually move the topic of exploration forward.\n\nAs such, I'm inclined to recommend rejection, but would like to see an expanded version of this work submitted to a future conference, because I do like the core idea of dynamically switching between explore and exploit policies for off-policy RL."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "-How sensitive is the model to the switching threshold parameter? \n\n- Was this threshold hand-tuned or optimized in some manner?\n\n- What is the computational impact of using two agents in terms of memory and processing requirements?\n\n- Does the model perform well in highly dynamic environments with frequently changing goals?\n\n- Can the method applied to other RL algorithms, beyond SAC?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The method is easy to understand and contributes to alleviating an important issue in the field of reinforcement learning. \n\n- Proposes a unique combination of SAC and a co-behavior agent with a dynamic switching mechanism.\n\n- Shows substantial performance gains in benchmarks like 2D navigation tasks and robot control.\n\n- Paper well written and easy to follow"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces KEA (Keeping Exploration Alive), a framework designed to enhance exploration efficiency in reinforcement learning, specifically for sparse reward environments. The core contribution of the paper is a novel method that combines the Soft Actor-Critic (SAC) approach with a co-behavior agent, designed to improve exploration by proactively coordinating multiple strategies. This coordination is facilitated by a dynamic switching mechanism, which alternates control between the SAC agent, which includes a curiosity-driven exploration method, and the co-behavior agent based on the novelty of the state being explored. This mechanism enables KEA to maintain high stochasticity in high-novelty regions and avoid premature convergence, thereby improving both learning efficiency and robustness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper should discuss how the switching mechanism threshold is tuned and its effect on the performance. This appears to be a key mechanism but somehow ablation studies on this component are limited. \n\n- The evaluation lacks comparison with papers that address a similar issue, such as [Never give up: Learning directed exploration strategies, Badia et al., 2020]. I would suggest adding stronger baselines to validate the proposed method.\n\n- The paper’s proposed method, KEA, is designed specifically with SAC in mind, leveraging SAC’s ability to handle exploration-exploitation trade-offs in off-policy settings. However, the authors do not discuss how KEA might generalize to other off-policy algorithms. \n\n- Finally, one of my main concerns comes from the switching mechanism, which utilizes a fixed threshold to decide when to switch between the SAC agent with curiosity-driven exploration and the co-behavior agent. While this approach appears effective in simulations, using a fixed threshold might lack the flexibility needed for different environments or tasks, especially those with varying reward sparsity or novelty patterns. This inflexibility could limit KEA’s adaptability, potentially leading to less effective exploration in more complex or dynamic scenarios"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In Figure 2 (right-upper part), when the intrinsic reward is higher than a threshold, KEA switches to the co-behavior agent $\\pi^B$. While this region is novel (i.e., high intrinsic reward), meaning the agent has not been well trained here. Why does the non-selected agent $\\pi^{SAC}$ show an uneven distribution? Generally, for unfamiliar samples (those that have been rarely trained), the policy tends to output a relatively uniform distribution.\n\n2. In Section 3.2, how is UTD set for both $\\pi^{SAC}$ and $\\pi^B$ respectively? In the UTD experiment, how is the batch size set? Since batch size has an impact on UTD performance, more details would be helpful.\n\n3. The reference paper [4] presents a framework similar to KEA, where an additional agent is trained, and a switching control mechanism is designed. I encourage the authors to discuss this relevant work and make comparisons.\n\n[4] Mguni, David, et al. \"Learning to shape rewards using a game of two partners.\" Proceedings of the AAAI Conference on Artificial Intelligence. 2023.\n\nI would like to increase the score if these concerns are addressed."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The approach presented in this paper is straightforward and easy to implement. It effectively combines two exploration strategies (curiosity-driven exploration and stochastic policy sampling). Experimental results demonstrate improved performance compared to approaches that rely on a single exploration strategy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a switching mechanism to cooridnate exploration behaviors from two agents, an SAC agent with curiosity-based exploration strategy, and a co-behavior agent with only stochastic policy to draw actions. The proposed algorithm used the novelty intrinsic reward to choose which agent to output action by comaparing with a threshold. Therefore, the proposed algorithm can actively corredinate two different exploration strategies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The underlying assumption for the switching criterion is not intuitive. In Equation (4) and the following explanation (from around Line 239 to Line 245), it is stated that for low intrinsic reward regions (i.e., low novelty), KEA switches to curiosity-based exploration to encourage the agent to explore novel areas. Conversely, for high novelty regions, KEA switches to a more stochastic sampling policy. However, it would make sense if it was the other way around, as high intrinsic rewards usually indicate that the area is novel and worth exploring further, making it more logical to apply a strong exploration strategy. Conversely, low intrinsic reward regions (low novelty) might be better suited for stochastic sampling. I recommend that this assumption be supported with more quantitative analysis or additional comparative experiments.\n\n2. The paper conflates curiosity-based exploration and novelty-based intrinsic reward exploration. They are generally considered two distinct approaches for encouraging exploration. While this doesn't undermine the core idea of the paper, clearer distinctions between the two should be made, and the terminology used should be more precise. Including more related representative works would also help.\n\n3. The experimental section can be improved. First, I suggest that the authors include additional baselines that focus specifically on exploration, beyond just the backbone algorithms. Since KEA’s core idea is to combine two exploration strategies, only comparing it against SAC, RND, and NovelD seems more like an ablation study (with only one agent in KEA involved). Representative SOTA works like [1][2][3] would offer better context for evaluating KEA’s performance. Additionally, for KEA itself, it would be valuable to explore other factors, such as how different values for the switching criterion (hyperparameter $\\delta$) affect the model's switching behavior, how many times were $\\pi^{SAC}$ and $\\pi^B$ activated in training, etc.\n\n4. The writing and expression can be improved, for example: (1) Lines 199-203 and Lines 128-130 are repeated verbatim, and (2) paragraphs like Lines 308-315 and Lines 404-411 could be better presented in table format for clearer understanding.\n\n[1] Devidze, Rati, Parameswaran Kamalaruban, and Adish Singla. \"Exploration-guided reward shaping for reinforcement learning under sparse rewards.\" Advances in Neural Information Processing Systems (2022).\n\n[2] Trott, Alexander, et al. \"Keeping your distance: Solving sparse reward tasks using self-balancing shaped rewards.\" Advances in Neural Information Processing Systems (2019).\n\n[3] Hong, Zhang-Wei, et al. \"Diversity-driven exploration strategy for deep reinforcement learning.\" Advances in neural information processing systems (2018)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In Fig. 1, I didn't understand why \"high intrinsic rewards can cause premature exploitation of novel regions\". Could you further elaborate this sentence?\n2. The statement of \"Soft Actor-Critic (SAC) is known to be sensitive to the scale of rewards\" should also be property justified either with a small experiment or some relevant literatures.\n3. How is the co-behavior policy actually trained?\n4. Does high stochasity always help with collecting diverse data from the environment? Methods such as novelD may encourage agent to try new actions to reach new states. This would consequently create more diverse data. But after introducing the high-variance co-behaviour policy, the agent may fall back to random policy and thus, collect repeated transitions.\n5. Could you justify why RND is used for futher analysis not NovelD?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper addresses an important research questions. Applying Off-policy to tasks with sparse rewards is always tricky and existing curiosity-based methods are not working out of the box.\n2. The paper is well motivated via a good visualization. The interplay between policy entropy and state novelty is interesting to look into.\n3. The proposed algorithm is simple to implement with a minimal extra computing budget. The switching mechanism is clearly written.\n4. The ablation study of various UTD ratios is also interesting. \n5. Relevant literature is good summarised."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Reinforcement Learning tasks with sparse rewards are always hard to solve. Curiosity-based approaches are derived to facilitate the exploration. However, Soft Actor-Critic (SAC) does not perform well even with curiosity-based exploration. To address this issue, this paper introduces a new exploration strategy KEA (Keeping Exploration Alive), which combines entropy-based exploration and curiosity-based exploration strategies by introducing a simple switching mechanism between two policies with different variance. This mechanism decides which policy to deploy based on the intrinsic rewards.\n\nFor experiments, the authors concluded a simple 2D discrete navigation tasks and analysed the learned policy’s entropy and novelty. Results show that their method can improve performance or learning speed for NovelD and RND. They also tried the methods on a set of sparse continuous control tasks. KEA outperforms baseline algorithms. An ablation study of different UTD (Update-to-Data), KEA is more consistent than other baseline methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The scope of the paper is rather limited as the authors only tackle Soft Actor-Critic (SAC) in their paper. This paper can be extended to other off-policy algorithms as they all share the issues of premature convergence of the exploration and the influence from different UTD ratios.\n2. The experiments are also not very convincing. First, the proposed 2D navigation task is too simple and the performance improvement is also not significant. Second, NovelD and RND both are used as baseline algorithms and NovelD has achieved better performance compared to RND. Yet, the latter experiments are all about RND. Thirdly, in the continuous tasks, the three tasks are not trained for enough time. The performance imporvement is also limited.\n3. The presentation/clarity of the paper should also be improved. For example, I didn't fully get Fig. 1's layout in the beginning. Some sentences are also written in a vague way without further explaination (See questions section). The training of the co-behavior policy is not mathematically defined."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "KEA improves exploration in sparse reward environments by proactively coordinating exploration strategies when combining SAC with curiosity-based methods, maintaining exploration-exploitation balance, and substantially improving learning efficiency."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024kea,\ntitle={{KEA}: Keeping Exploration Alive by Proactively Coordinating Exploration Strategies in Curiosity-driven Exploration},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zG2vcC1l1f},\nnote={under review}\n}"
},
"abstract": {
"value": "In continuous control tasks, Soft Actor-Critic (SAC) has achieved notable success by balancing exploration and exploitation. However, SAC struggles in sparse reward environments, where infrequent rewards hinder efficient exploration. While curiosity-driven exploration methods help address this issue by encouraging the agent to explore novel states, they introduce challenges, such as the difficulty of setting an optimal reward scale and managing the interaction between curiosity-based exploration and SAC’s stochastic policy. These complexities often lead to inefficient exploration or premature convergence and make balancing exploration-exploitation challenging. In this paper, we propose KEA (Keeping Exploration Alive) to tackle the inefficiencies in balancing the exploration-exploitation trade-off when combining SAC with curiosity-based methods. KEA introduces an additional co-behavior agent that works alongside SAC and a switching mechanism to facilitate proactive coordination between exploration strategies from the co-behavior agent and the SAC agent with curiosity-based exploration. This coordination allows the agent to maintain stochasticity in high-novelty regions, preventing premature convergence and enhancing exploration efficiency. We first analyze the difficulty of balancing exploration-exploitation when combining SAC with curiosity-based methods in a 2D grid environment. We then evaluate KEA on sparse reward control tasks from the DeepMind Control Suite and compare against two state-of-the-art curiosity-based exploration baselines — Random Network Distillation (RND) and NovelD. KEA improves episodic rewards by up to 119% over RND and 28% over NovelD, significantly improving learning efficiency and robustness in sparse reward environments."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement Learning",
"Curiosity-based Exploration",
"Sparse Reward",
"Soft Actor-Critic"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b1b6117e136b5d1e4be59749d158a39856a55871.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "KEA: Keeping Exploration Alive by Proactively Coordinating Exploration Strategies in Curiosity-driven Exploration"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zG459X3Xge | VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents | main | Active | Retrieval-augmented Generation;Vision-language Models | generative models | 3;5;6;6 | 5;3;4;4 | 2;3;3;3 | 2;2;3;3 | 3;3;4;3 | 5 | 4 | 2.75 | 2.5 | 3.25 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Significance of position-weighted mean pooling: How would the model perform with these designs? Would the proposed position-weighted mean pooling produce significantly better results?\n2. Effects of concatenated multiple images: Would this affect VLM performance, particularly when handling a larger number of retrieved images (e.g., more than five)?\n3. Efficiency of VLM-based retrieval: Would using the VLM significantly increase retrieval time? How would the number of retrieved images affect retrieval time?\n4. Clarity on accuracy measurement: How is accuracy measured? Is an additional LLM used to evaluate the textual responses?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. [Originality] The proposed method is promising. With the VLMs showing increasing capabilities in understanding images, encoding images with VLMs is a natural improvement from traditional text-based retrieval methods.\n2. [Quality] The paper presents relatively comprehensive experiments, demonstrating advantages in both retrieval and generation capabilities of the proposed methods.\n3. [Significance] The proposed method clearly outperforms various baselines.\n4. [Clarity] The paper is generally well-written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes VisRAG, a multimodal retrieval-augmented generation pipeline. Different from traditional RAG methods, which rely on text parsing to retrieve information from visual content, this paper utilizes a VLM-based retriever and generator to navigate information relevant to the query and generate responses. The VisRAG design consists of two main stages: (1) Retrieval: Given a query, the VLM retrieves a set of relevant images from the dataset leveraging the cosine similarity between the query embedding and image embeddings; (2) Generation: the VLM uses a combination of retrieved images and the query to produce responses. VisRAG is compared with a series of baselines and show advantages in both retrieval and generation capabilities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major concerns:\n1. In the paper, when retrieving relevant images, the authors adopt position-weighted mean pooling over the last-layer VLM hidden states. This choice seems heuristic-based. Although the paper points out that VLMs are with causal attention, potentially emphasizing weights on later tokens, after multiple VLM layers, we could expect information to propagate across most tokens. In fact, previous works often use the leading token or simply learn a linear layer from all tokens. How would the model perform with these designs? Would the proposed position-weighted mean pooling produce significantly better results?\n2. When using multiple images to generate responses, one method proposed in the paper is image concatenation, which is performed horizontally. This could change the resolution of the final image. Would this affect VLM performance, particularly when handling a larger number of retrieved images (e.g., more than five)?\n3. The paper does not report retrieval efficiency compared to text-based methods. Would using the VLM significantly increase retrieval time? How would the number of retrieved images affect retrieval time?\n\nMinor comments:\n1. When measuring the performance of generation, how is accuracy measured? To me, this is particularly unclear for synthetic data, where ground truth answers and queries are generated by GPT-4. Is an additional LLM used to evaluate the textual responses?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Improve text-only RAG by establishing a vision-language model (VLM)-based RAG pipeline.\n2. The experiment is thorough under the designed scenario.\n3. The authors have promised to make the data and code open source."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces VisRAG to address the limitations of text-only RAG systems by incorporating vision-language models (VLMs). VisRAG enables the processing of documents as images. By training the VisRAG retriever with open-source and synthetic data and exploring various generation methods, experimental results show VisRAG's superiority in both retrieval and generation tasks. The authors also plan to make their code and data publicly available."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of novelty: For retrieval, it uses the dual-encoder-style and just changes them to VLM embeddings. The position-weighted mean pooling is also off-the-shelf. For generation, they only try a few straightforward strategies.\n2. Lack of design for document understanding: There is a lack of design considerations specifically for document understanding. The proposed method is not tailored for document understanding scenarios. Alternatively, why not validate the method on non-document (common image) datasets?\n3. Confusion regarding the application: Why does a person always need the assistance of document images when asking general questions? For example, in Line 884, Why would a person directly ask a question like 'Where were the two magnesium containing papers made at?' without reference to any document? If he already has this document as a reference, why does he still need to do a retrieval process at first?\n4. Why only experiment with text-only or image-only features, not image+text(+layout)?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. In the comparison process, Colpali and Vis RAG-Ret should be fine-tuned based on the same data, rather than using the pre-trained model to compare directly. Please add more comparing results.\n2.The article needs to provide more comparison results with the current best models, such as GPT o1, Qwen and other leading models that can directly input image for VQA task."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "In this paper, the authors propose an image-based RAG method named VisRAG, and construct a new dataset to verify the relevant effects. This paper does a lot of comparative experiments to verify its innovation. A large number of state-of-the-art methods have been combined and proven to be effective."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper focus on the research of vision-language model (VLM)-based RAG\nPipeline and propose a novel method named VisRAG. It broadens the external data processed by RAG from text to image. To achieve the goal, the authors introduce two vision-optimized modules: VisRAG-Ret and VisRAG-Gen. This VisRAG-Ret directly utilizes the images without extracted textual content and establish the map between the query and the documents in the latent space. The VisRAG-Gen leverage the help of the existing LLMs and VLMs for generating the answers. The image processing technology is integrated to realize the efficient generation of multiple inputs. Moreover, a new dataset is built combining a vision question answering (VQA) dataset and synthetic data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The method in this paper is mainly based on the retrieval of image data, which is basically consistent with the text retrieval framework, and lacks innovation. In addition, similar to the comparative model Colpali's innovation, the VisRAG is fine-tuned with a new dataset and new VLM which is not sufficient to express its innovation in multimodality area."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See \"weaknesses\""
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper provides a clear and detailed description of the VisRAG framework., which makes it easy to understand.\n2. The experimental results on document-related VQA tasks are promising, suggesting that the proposed method could be a practical solution."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes VisRAG, a model that leverages vision-language models (VLMs) for retrieval-augmented generation in multi-modal documents. The framework comprises two components: VisRAG-Ret for retrieval and VisRAG-Gen for generation, utilizing a data construction method that combines visual question answering (VQA) datasets with synthetic data. Experiments demonstrate that VisRAG outperforms traditional RAG in both retrieval and generation tasks, exhibiting improved training data efficiency and generalization capabilities. It achieves a significant relative increase in end-to-end accuracy, highlighting its potential to replace text-based RAG for multi-modal documents."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The rationale behind the proposed VisRAG-Ret is unclear. If no OCR information is included, how can the target image be retrieved, especially for answers that require precise text content? From my perspective, a pooling-based representation feature does not offer sufficient information.\n\n2. The experimental setup is not sufficiently convincing, indicating that the VQA task alone cannot fully demonstrate the method's effectiveness in the retrieval task. In other words, employing such a complex pipeline to address the VQA task is akin to using a sledgehammer to crack a nut.\n\n3. The methods discussed are insufficient. To the best of my knowledge, there are several document-specific MLLMs, such as mlug-docowl, ureader, and text monkey, among others. While these works may not be directly related to RAG, they should at least be mentioned and analysized.\n\n4. In my view, the comparison is unfair, as both Minicpm and Siglip can not only perform VQA tasks but also handle other tasks related to document image understanding and content perception."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024visrag,\ntitle={Vis{RAG}: Vision-based Retrieval-augmented Generation on Multi-modality Documents},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zG459X3Xge},\nnote={under review}\n}"
},
"abstract": {
"value": "Retrieval-augmented generation (RAG) is an effective technique that enables large language models (LLMs) to utilize external knowledge sources for generation. However, current RAG systems are solely based on text, rendering it impossible to utilize vision information like layout and images that play crucial roles in real-world multi-modality documents. In this paper, we introduce VisRAG, which tackles this issue by establishing a vision-language model (VLM)-based RAG pipeline. In this pipeline, instead of first parsing the document to obtain text, the document is directly embedded using a VLM as an image and then retrieved to enhance the generation of a VLM. Compared to traditional text-based RAG, VisRAG maximizes the retention and utilization of the data information in the original documents, eliminating the information loss introduced during the parsing process. We collect both open-source and synthetic data to train the retriever in VisRAG and explore a variety of generation methods. Experiments demonstrate that VisRAG outperforms traditional RAG in both the retrieval and generation stages, achieving a 25–39% end-to-end performance gain over traditional textbased RAG pipeline. Further analysis reveals that VisRAG is effective in utilizing training data and demonstrates strong generalization capability, positioning it as a promising solution for RAG on multi-modality documents. Our code and data will be made publicly available."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Retrieval-augmented Generation",
"Vision-language Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c07a4d6b9ca632b81e1e9e93876976a97cdc1c2a.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zGb4WgCW5i | Intervening Anchor Token: Decoding Strategy in Alleviating Hallucinations for MLLMs | main | Active | Multimodal large language models; Hallucination | foundation or frontier models, including LLMs | 5;6;8;8 | 3;3;4;3 | 3;4;4;3 | 3;3;4;3 | 2;3;3;3 | 6.75 | 3.25 | 3.5 | 3.25 | 2.75 | 0.555556 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. in Table 1: the baselines with max token 512 generate very high hallucination (>30%). Is it aligned with user's experience? At least I feel Gemini and ChatGPT's numbers should be much better than that.\n\n2. I feel the proposed method can be used for any LLM based method. Why does the title claim it only for multimodal LLMs?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Simple method which is very easy to use\n2. Good insights from eigenspectrum variance\n3. Constantly improve the decoding methods of InstructBLIP, MiniGPT-4, LLaVA-1.5, and Shikra."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the role of anchor tokens in causing hallucinations in LLMs, and proposed Dynamic Token Propagation Mechanism (TAME) based on eigenspectrum variance of the attention weight. The results show that TAM can be integrated with existing decoding method including Beam/VCD/ICD/SID and improve the performance consistently on InstructBLIP, MiniGPT-4, LLaVA-1.5, and Shikra."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I hope the paper could explore additional approaches to reduce hallucinations, such as Retrieval-Augmented Generation (RAG), Reinforcement Learning from Human Feedback (RLHF), improvements in factuality metrics, and high-quality, data-based instruction tuning.\n\n2. The paper currently uses the a few simple eval sets like MS COCO. I would suggest incorporating more challenging benchmarks for a more rigorous assessment.\n\n3. The writing could benefit from refinement. For instance, many citations are not in the correct format.\n\n4. The paper only reports metrics for hallucination. But it is not clear the new predictions do better or worse in other metrics. Sometimes less hallucination may lead to less details or not as friendly to users to read."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weakness."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1.\tThis paper provides a solid theoretical framework by analyzing the relationship between LVLM hallucinations and attention patterns.\n2.\tThis paper introduces a novel plug-and-play decoding strategy that dynamically adjusts the eigenspectrum variance of attention weights to mitigate hallucinations without adding inference time.\n3.\tExtensive experiments show that TAME improves hallucination metrics across multiple MLLMs.\n4.\tTAME can be integrated into various decoding strategies without requiring additional training or data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the causes of hallucinations in LVLMs through the attention weight matrix of anchor tokens. Then this paper demonstrates the propagation pattern of anchor tokens by the eigenspectrum of the attention weight matrix. The authors further propose a versatile plug-and-play decoding strategy named Dynamic Token Propagation Mechanism (TAME) to reduce the over-propagation of anchor tokens through dynamically intervening in the eigenspectrum variance. Extensive experiments show that TAME improves hallucination metrics across multiple MLLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tAlthough this paper provides an additional theoretical framework, the impact of anchor tokens on the LVLM hallucination was first proposed by OPERA [1].\n2.\tTAME primarily addresses object hallucinations but may not be as effective for other hallucination types, such as incorrect attributes or relations (e.g., limited performance improvement on the MME benchmark). Experiments on more types of hallucination should be conducted.\n3.\tIt would be interesting to study applying TAME to some layers rather than all layers through experiments.\n\n[1] OPERA: Alleviating Hallucination in Multi-Modal Large Language Models via Over-Trust Penalty and Retrospection-Allocation"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The motivations are strong, with in-depth analysis and derivation of the mechanisms behind hallucinations in MLLMs.\n2. The proposed method, TAME, is a simple and effective plug-and-play decoding strategy.\n3. The experiments are thorough, validating both the causes of hallucinations mentioned in the paper and the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "MLLMs encounter hallucination issues, where the generated text does not match the provided visual content. Previous methods have attempted to mitigate hallucinations by designing special decoding strategies, such as penalizing summary tokens, but they lack an analysis of the relationship between the model’s hallucinations and the summary mechanism. In this paper, a study and analysis of the causes of hallucinations in MLLMs are presented. A general decoding strategy, TAME, is proposed to reduce the excessive propagation of anchor tokens by dynamically intervening in the variance of feature spectra. Experiments demonstrate the correlation between feature spectra and hallucinations in MLLMs, as well as the effectiveness of TAME."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The introduction mentions summary tokens but does not define them, leaving unclear the distinction between summary tokens and anchor tokens.\n2. There are some typos in the text that need to be checked:\n 1. Line 301: \"Conversly\" -> \"Conversely\"\n 2. Line 333: \"7,3\" -> \"7.3\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the qualitative results, it appears that the proposed method tends to make the model’s output shorter, which is expected, as the goal is to reduce hallucinated content. However, from another perspective, could this lead to insufficient perception of the image by the model, potentially missing some details? If so, how could we measure and balance this trade-off?\n\n2. The example in Figure 6 is somewhat unclear, as it’s difficult for reviewers to determine which parts of the image or which specific patches correspond to the enhanced visual tokens. Could the authors provide a visualization of the relevant visual tokens mapped onto the image?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The paper is well-written and clear motivated. The authors begin by introducing hallucinations and related work, gradually diving deeper by focusing on the phenomena of token information flow and information aggregation, ultimately leading to the proposal of TAME. \n\n2. This paper is built on a detailed and clear theoretical foundation. Although the proposed method appears simple, the step-by-step theoretical derivation strongly supports it. \n\n3. The proposed method is simple-yet-effective, which obviously surpasses all baselines on CHAIR, POPE and GPT-4 assisted hallucination evaluation. Meanwhile, the method introduces no additional computation overhead, which is superior to existing contrastive decoding based methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new decoding method to address hallucination issues in multimodal large language models (MLLMs). The authors identify that excessive propagation of “anchor tokens”, influenced by polarized variance in the attention mechanism’s eigenspectrum, contributes to hallucinations by sidelining crucial visual information. To mitigate this, the paper introduces the Dynamic Token Propagation Mechanism (TAME), a plug-and-play decoding strategy that adjusts the eigenspectrum variance in the attention weights, limiting over-propagation without adding to inference time. Extensive experiments demonstrate that TAME effectively reduces hallucinations across multiple MLLMs and achieves great improvements over existing methods like Greedy Decoding and Visual Contrastive Decoding (VCD). The method is simple-yet-effective and easy to implement."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some of the color schemes in the figures, such as Figures 4 and 5, are prone to causing confusion. It is recommended to use colors with higher contrast and more distinct differentiation.\n\n2. The paper begins its analysis with the phenomena of token information flow and token information aggregation, gradually developing a method to mitigate hallucinations. However, it does not yet discuss the underlying causes or triggers of these phenomena, only relying on empirical analysis. Nevertheless, the analytical approach used in the paper is still worth learning from.\n\n3. The paper provides limited explanation of the proposed method. Although the method is simple (just one single formula), a smoother transition from the theoretical derivation is needed, along with a clear description of its application scenarios."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024intervening,\ntitle={Intervening Anchor Token: Decoding Strategy in Alleviating Hallucinations for {MLLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zGb4WgCW5i},\nnote={under review}\n}"
},
"abstract": {
"value": "Multimodal large language models (MLLMs) offer a powerful mechanism for interpreting visual information. However, they often suffer from hallucinations, which impede the real-world usage of these models. Existing methods attempt to alleviate this issue by designing special decoding strategies that penalize the summary tokens. However, these methods lack analysis of the relationship between hallucination and summarization mechanism of LLMs. Interestingly, we find that penalizing summary tokens is not necessary: merely intervening the query-key parameters variance, without costing extra inference time, still alleviates hallucinations. Specifically, we explore the causes of hallucinations by analyzing localized self-attention patterns called ``anchor\" tokens and define the attention localization degree of the model as token propagation probabilities. Our analysis reveals that over-propagation of anchor tokens occurs when the distribution of eigenvalues of the query and key matrices has a non-zero mean and a polarized variance, leading to excessive dependence on anchor tokens while neglecting vision information and describes the image content with hallucination. Based on the observation, we propose a versatile plug-and-play decoding strategy, Dynamic Token Propagation Mechanism (TAME), to alleviate excessive propagation by dynamically intervening the eigenspectrum variance of the attention weight, thereby alleviating hallucinations without relying on complex decoding strategies. Extensive experiments reveal a correlation between the eigenspectrum and hallucinations across various MLLMs, and show that TAME reduces the percentage of hallucinated objects."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multimodal large language models; Hallucination"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7262be39349b4cb875590a81dab3fe80a0578c09.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Intervening Anchor Token: Decoding Strategy in Alleviating Hallucinations for MLLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zGej22CBnS | Exact Byte-Level Probabilities from Tokenized Language Models for FIM-Tasks and Model Ensembles | main | Active | Language models;Tokenization;Probability;Sampling | foundation or frontier models, including LLMs | 5;6;6;8 | 4;4;4;3 | 3;3;3;3 | 2;3;3;3 | 2;3;3;3 | 6.25 | 3.75 | 3 | 2.75 | 2.75 | -0.927173 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness above. In addition, \n\n1. Do authors plan to open-source the implementation?\n\n2. There is also a line of work similar to token healing and seems working well in both code and text. Could the authors study it and see whether the token alignment method could mitigate the issue especially in single-model completion tasks? \nToken Alignment via Character Matching for Subword Completion https://aclanthology.org/2024.findings-acl.929.pdf"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors attempted to address a critical problem on tokenization for LLMs. They are the first to introduce tokenization bias and its effects on model behavior. They present a theory of it and introeduced efficient implementation of the algorithm.\n\nThe authors also studied the practical impact of the proposed solution through both code generation task (FIM, speficially) and model emsambles. Results indicate that this training-free approach is working well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper reveals a fundamental issue in language models regarding tokenization bias - where tokenized and byte-level models, despite being statistically equivalent, produce different predictive distributions. The authors introduce the Byte-Token Representation Lemma, providing a framework to convert tokenized language models into statistically equivalent token-free ones without additional training. They demonstrate practical applications in fill-in-the-middle tasks and model ensembles, achieving significant improvements: 18% on FIM coding benchmarks and up to 3.7% on ensemble tasks. The paper provides both theoretical foundations and practical implementations through efficient algorithms for cover encoding search and next-byte sampling."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the current results are great, it'd be better if authors can also cover tasks other than FIM that suffer from this issue, e.g. complete a story in natural language or generic code completion task that doesn't require FIM.\n\nThe authors discussed computational overhead but doesn't thoroughly analyze the latency impact especially in a practical setting. It'd be better if authors can study these as part of the experiments authors conducted.\n\nThe interaction between the proposed byte-level prediction method and popular sampling techniques (like top-p, top-k) have not been explored enough, especially in the ensemble context."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* How is the proposed method compatible with other tokenization algorithms besides BPE and MPE?\n* What do the right two sub-figures mean in Figure 6?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* This paper defines the tokenization bias phenomenon, which could cause performance issues in practical scenarios. \n* They evaluated the proposed method on FIM code completion task and mode ensemble task. \n* The proposed byte-level prediction method considers both the accuracy and efficiency in recovering the unbiased byte-level distribution."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the impact of tokenization on the performance of language models, particularly highlighting a phenomenon called \"tokenization bias.\" This bias comes from the discrepancies between the predictive distributions of tokenized LMs and their statistically equivalent byte-level distribution when predicting the next byte or token. In this paper, they introduce the Byte-Token Representation Lemma (BTR Lemma) to map between the token and byte domain, which allows the computation of exact byte-level probabilities without requiring extra modification of the model. This method identifies \"cover encodings\" for a given byte string which are all possible valid tokenizations of a string that \"optimally\" contain the string in the prompt. Experiments on FIM code completion task and model ensemble task show that the proposed method improves model performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* This method is very similar to the ACL 2024 paper 'Token Alignment via Character Matching for Subword Completion' which addresses the same issue and proposes more evaluation scenarios. However, in this paper, they didn’t compare with the token alignment method. \n\n* Based on the results shown in Figure 6, the improvement of the proposed method on the FIM evaluation tasks in sampling settings (pass@10) over the token-level with token healing method is very minor. For example, from 84.0 to 84.1. Why is that? \n\n* The evaluation scenarios are limited. Only two types of tasks are covered. How’s the performance of the method on general completion tasks without the FIM setting? Does the tokenization bias also have an impact on natural language completion tasks?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I am curious what do authors think about the generalizability of their methods to non-deterministic tokenization or in applications that involve mixed token vocabularies ? Could some experimental results be shown to further strengthen the paper ? \n\nHow do authors explain the not-so-strong gains with ensebling (Table 1 and Table 2). It was not clear how the models are being ensembled together? I would have also liked to see other combination strategies as a baseline e.g. consensus voting or ranking over diverse samples from different LMs. Are these experimental results available ? \n\nIn order to truly appreciate the need for this research, its important to know just how much diverseity there exists in tokenization vocabulary across different LLMs today. To that end, I would like to see some sort of distribution characterizing the popularity of tokenization and distribution of vocabulary sizes across various LLMs. If the world is already coverging to one tokenization and the vocabulary of tokens homogenizes over time, then what practical advantages does this paper bring ? We should debate whether or not this is true and I would like to see authors providing their rebuttal."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "What I really liked about this paper is that it very systematically investigates the phenomenon of tokenization bias. It develops a byte level prediction method and overcomes the bias. The applicability of this method and the findings in the paper have implications in areas such as coding where fill in the middle is crucial as well as in model ensembling. The idea of combining ensembles of foundation models at the logit layer is very appealing. Back in the day when there were no tokens and LMs were being built on whole words, ensembling using to be the norm in any practical LM based applications, such as speech recognition. With varying tokenization powering different LLMs today, the ability to freely combine any LLM with each other became restrictive. This paper proposes byte level efficient sampling algorithms and in conjuction with the byte-token representation lemma and the associated theoretical foundation, now allows the community to confidently combine predictions from any LLMs. Tokenization allows the LMs to overcome closed vocabulary limitation and from my perspective, this paper further allows us to overcome distinct vocabulary challenges associated with model ensembling."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a thorough investigation into the phenomenon of tokenization bias in language models, proposing a byte-level prediction method to mitigate this bias. The approach has promising implications, especially for tasks like fill-in-the-middle (FIM) in coding and for model ensembling. The idea of combining language models with distinct tokenizations at the logit level recalls older ensemble methods, commonly used before tokenization became mainstream in modern LLMs. The proposed method, underpinned by the Byte-Token Representation Lemma and efficient sampling algorithms, allows for effective ensemble models by bypassing tokenization restrictions, facilitating seamless integration across diverse language models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the results are promising for fill in the middle (in coding) tasks, I would have liked to see experimentation on other tasks to ascertain this method's generalizability to a wider variety of tasks, espically those that require context senstitive token predictions. Computer use, a new usecase demonstrated by companies such as Anthropic, could be a good use case that the authors could consider as an example. \n\nAn area that authors could delve further into is more analysis on the tokenization bias in multi step or long context tasks. \n\nIn order to truly appreciate the need for this research, its important to know just how much diversity there exists in tokenization vocabulary across different LLMs today."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In the caption for Figure 2, should the probability of sampling “A” on the LHS be $(1 - \\alpha)$? It states the probability is $\\alpha$.\n\nI would be willing to change my opinion based on the responses to the weaknesses!"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The characterization of the tokenization bias phenomenon, with examples / and the synthetic example with a 3rd order Markov chain, is very interesting and useful to the community\n- The paper is well-written and studies an important problem"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Tokenization is a common pre-processing step, which shortens input sequences by mapping multiple bytes to discrete tokens. \nPrior work finds: Even for unlimited data and compute, tokenized models can achieve better cross-entropy loss vs. untokenized models\nOne hypothesis for why tokenization helps is that it allows models to handle longer contexts at less compute\nMain problem: there are certain combinations that cannot be represented in token space, limiting the model’s abilities. This is harmful esp. for prompts that end mid-token.\nApproach: this work presents a way to predict when tokenization is getting in the way and converts tokenized LMs into statistically equivalent byte-level models"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The code infilling setting is an interesting example of where this work could be useful. However, it is not clear why the prefix would stop in the middle of a token/word or why new tokens would arise at inference time. Why are the <PRE>, <MID>, and <SUF> tokens needed? Why is the study restricted to two prompting formats/are others possible? Since tokenization schemes are lossless, it seems like any prefix should be representable with the set of available tokens at training time. Further explanation of the evaluation setting and why it is realistic would be helpful.\n- Several prior methods can ensemble the predictions from models that do not share a vocabulary, or ensemble multiple predictions from a single model (https://arxiv.org/abs/2203.11171, https://arxiv.org/abs/2210.02441, https://arxiv.org/abs/2307.11031). It is not clear whether the ensembling method proposed in this paper would outperform these methods . The motivation that byte-representations help ensembling is not fully examined due to the lack of these comparisons.\n- It would be useful to provide decoding efficiency metrics corresponding to each decoding approach in Figure 6 since efficiency is a motivation in Section 4."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We describe an algorithmic process to turn any tokenized LLM into its statistically equivalent byte-level LLM. Applications; FIM task + model ensembles."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024exact,\ntitle={Exact Byte-Level Probabilities from Tokenized Language Models for {FIM}-Tasks and Model Ensembles},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zGej22CBnS},\nnote={under review}\n}"
},
"abstract": {
"value": "Tokenization is associated with many poorly understood shortcomings in language models (LMs), yet remains an important component for long sequence scaling purposes. This work studies how tokenization impacts model performance by analyzing and comparing the stochastic behavior of tokenized models with their byte-level, or token-free, counterparts. We discover that, even when the two models are statistically equivalent, their predictive distributions over the next byte can be substantially different, a phenomenon we term as ``tokenization bias''. To fully characterize this phenomenon, we introduce the Byte-Token Representation Lemma, a framework that establishes a mapping between the learned token distribution and its equivalent byte-level distribution. From this result, we develop a next-byte sampling algorithm that eliminates tokenization bias without requiring further training or optimization. In other words, this enables zero-shot conversion of tokenized LMs into statistically equivalent token-free ones. We demonstrate its broad applicability with two use cases: fill-in-the-middle (FIM) tasks and model ensembles. In FIM tasks where input prompts may terminate mid-token, leading to out-of-distribution tokenization, our method mitigates performance degradation and achieves an approximately 18\\% improvement in FIM coding benchmarks, consistently outperforming the standard token healing fix. For model ensembles where each model employs a distinct vocabulary, our approach enables seamless integration, resulting in improved performance (up to 3.7\\%) over individual models across various standard baselines in reasoning, knowledge, and coding."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Language models",
"Tokenization",
"Probability",
"Sampling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4c8d5ca9ac45632e40720c72bcc911ca29f7b78e.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Exact Byte-Level Probabilities from Tokenized Language Models for FIM-Tasks and Model Ensembles"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zGvwENuzPU | Bias-Augmented Consistency Training Reduces Biased Reasoning in Chain-of-Thought | main | Active | Chain-of-thought prompting;Explainability;Generalization;Reasoning;Bias | alignment, fairness, safety, privacy, and societal considerations | 3;3;5;6 | 3;3;4;3 | 2;3;3;3 | 2;2;2;3 | 3;3;3;4 | 4.25 | 3.25 | 2.75 | 2.25 | 3.25 | 0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could you explain the mechanism behind cross-bias generalization, e.g., why does training on sycophancy bias help reduce pattern matching biases, or have you identified common features among biases that show better generalization?\n\nThe paper demonstrates BCT works but doesn't fully explain why. Could you provide analysis of what the model is actually learning during BCT?\n\nRegarding paraphrase requirements, what is the minimum number of paraphrases needed for effective BCT? What is the relationship between number of paraphrases and performance?\n\nIs it possible to experiment with some methods of balancing bias reduction with instruction following?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper addresses an important problem in AI systems - the tendency of language models to generate biased reasoning without acknowledging the influence of biasing features. The empirical results show some promise, with reported reductions in certain types of biases. The experimental setup is generally well-documented, facilitating reproducibility, and the authors are commendably transparent about some of the method's limitations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Bias-Augmented Consistency Training (BCT), presenting a novel unsupervised approach to reducing biased reasoning in large language models. The work reframes the challenge of biased reasoning as a consistency problem, training models to maintain coherent reasoning patterns across prompts with and without biasing features. Through extensive evaluation the authors demonstrate BCT's effectiveness in trained bias and held-out biases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the presented approach to bias mitigation is interesting, several weaknesses limit its potential contribution. \n\n- The paper lacks a clear theoretical explanation for the effectiveness of BCT, making it difficult to predict its applicability to different biases. While this is understandably a common challenge for LLMs, the resource-intensive nature of BCT, requiring numerous paraphrases for each bias type, raises concerns about its scalability and practicality. Additionally, the paper does not adequately address the potential challenges in generating high-quality paraphrases at scale.\n\n- The inconsistent performance of BCT across different bias types raises questions about its reliability and generalizability. Furthermore, the observed degradation in instruction-following capabilities suggests potential unintended consequences, highlighting the need for a more thorough investigation of potential negative impacts. Perhaps its worthwhile to be more comprehensive in more instruction-following tasks.\n\n- The experimental evaluation is limited in scope, focusing primarily on multiple-choice questions and bias reduction metrics. This narrow focus raises concerns about the generalizability of the findings to open-ended tasks and real-world applications. The paper's claim of successful generalization to unseen biases lacks a convincing analysis of the underlying mechanisms.\n\n- The paper does not adequately position its contribution within the broader context of existing debiasing approaches. While BCT demonstrates some empirical success, its limitations and potential drawbacks raise questions about its overall significance and potential advantages over alternative methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Are there results for other LLMs beyond GPT-3.5T?\n- Can you formalize what the BCT objective is?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Originality: The paper addresses an important problem of doing biased/unbiased reasoning answers to questions that are potentially biased. \n- Quality/Clarity: The paper is clearly written and the structure is simple and clear to follow. \n- Significance: The paper provides some results that show that, under standard bias metrics, the proposed approach could help generate unbiased answers in the context of biased questions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method for fine-tuning of large language models by using the concept of biased-consistency training (BCT). The aim is to reduce biased reasoning in chain-of-thought by incorporating a suite testing nine forms of biased reasoning under the (BCT) unsupervised fine-tuning method. The method is tested on question answering tasks run with GPT-3.5T."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- From Figure 2, I understand the main contribution of the paper is the BCT fine-tuning objective. However, there is no comprehensive explanation of what it entails, the assumptions, limitations of the proposed approach. Furthermore, Section 2 which is supposed to be the methodology part delves more around existing approaches, whilst leaving a single small paragraph for the explanation of the method.\n- The experiments are limited to GPT-3.5T. Therefore, there is no demostration of how generalizable the proposed method is to LLMs in general.\n- Beyond Figure 3, there is no other place in the experiments where the results of the proposed method in question answering tasks are clearly explained, with clear examples that show how this proposed method produces unbiased/biased answers."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What is the BCT training objective? From the description in Section 2.2, is it simply adding both biased and unbiased responses when fine-tuning the model, or the objective actually changed?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The studied problem is important: faithfulness in CoT reasoning is relatively under-explored and it's important to ensure the models to produce unbiased reasoning to users.\n\n- The results on generalization to other types of biased reasoning is very interesting, and demonstrates the usefulness of the proposed method.\n\n- The authors provided fairly comprehensive analysis on models' behavior when trained with BCT, especially the observation where BCT reduces coherent biased reasoning is kind of interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new method called bias-augmented consistency training to improve faithfulness in CoT Reasoning. Specifically, the paper shows that training on certain bias types can help reduce biases in other types of biased reasoning behaviors."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Novelty: overall the proposed method is similar to existing literature on reducing bias in ML/NLP models with data augmentation or counterfactual augmentation, hence the novelty of the work is a bit lacking. The authors should provide more discussion on how this work is different from existing literature.\n\n- For the generalization behavior, currently the main experiments are done on Sycophancy examples and generalization is shown on other bias types. It would be more useful to show if this trend holds or not (and to what extent) regardless of which bias type the model is being trained on. Some of the experiments are provided in Appendix E but the results are not very detailed. In Figure 5, why would multi-bias training perform on-par and sometimes even worse than single-bias? More discussion on this would be useful.\n\nMinor:\n- Many of the important results are included in the appendix, it would be more useful to re-organize the experiment section a bit and show all the major results in the main text. For example, the main results are only shown over one model: gpt-3.5-turbo-0613. The authors provided some additional experiments on LLaMa3-8B in the appendix, but it would be more useful to bring those experiments to the main text to show that the method generalizes across models. Also, is there a study on what model sizes are required for BCT to be effective?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. How does the instruction-following data influence the effectiveness of BCT?\n2. Will BCT affect the model's calibration?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduced bias-augmented consistency training is simple yet effective. Its unsupervised nature suggests broad applicability and scalability.\n2. The authors presents strong empirical results showing generalization across bias types and tasks.\n3. The paper is very well-written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces bias-augmented consistency training (BCT), an unsupervised fine-tuning method to reduce biased reasoning in chain-of-thought (CoT) language model outputs. The authors demonstrate that BCT significantly reduces biased reasoning by training models to give consistent reasoning across prompts with and without biasing features. The key contribution is showing that training on a single bias type generalizes well to reducing other forms of biased reasoning and to new tasks, without requiring labeled data. The authors also validate that BCT minimally impacts model performance on standard tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. My main concern with the proposed BCT is its potential negative impact on few-shot performance or, more broadly, in-context learning. While Appendix I shows some evidence for this, I'm not fully convinced, as Figure 9 only includes TruthfulQA results. I recommend including few-shot and few-shot CoT results on additional benchmarks, such as MMLU, to provide a more comprehensive assessment.\n2. Section 5.2 demonstrates that BCT reduces coherent biased reasoning, highlighting a trade-off between CoT faithfulness and sensitivity to biases. This important finding warrants further discussion and analysis.\n3. Some experimental design choices lack clear justification. For instance, the rationale behind measuring task and bias generalization simultaneously, as well as the selection of training data mixture proportions, could be better explained."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce an unsupervised consistency training method that effectively reduces unfaithful biased reasoning in language models, even on held-out forms of bias."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024biasaugmented,\ntitle={Bias-Augmented Consistency Training Reduces Biased Reasoning in Chain-of-Thought},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zGvwENuzPU},\nnote={under review}\n}"
},
"abstract": {
"value": "While chain-of-thought prompting (CoT) has the potential to improve the explainability of language model reasoning, it can systematically misrepresent the factors influencing models' behavior--for example, rationalizing answers in line with a user's opinion without mentioning this bias. To mitigate this biased reasoning problem, we introduce bias-augmented consistency training (BCT), an unsupervised fine-tuning scheme that trains models to give consistent reasoning across prompts with and without biasing features. We construct a suite testing nine forms of biased reasoning on seven question-answering tasks, and find that applying BCT to GPT-3.5-Turbo with one bias reduces the rate of biased reasoning by 86% on held-out tasks. Moreover, this model generalizes to other forms of bias, reducing biased reasoning on held-out biases by an average of 37%. As BCT generalizes to held-out biases and does not require gold labels, this method may hold promise for reducing biased reasoning from as-of-yet unknown biases and on tasks where ground truth reasoning is unavailable."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Chain-of-thought prompting",
"Explainability",
"Generalization",
"Reasoning",
"Bias"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9ffabdefe6da242b2a44477ade75d4bec4533f7d.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bias-Augmented Consistency Training Reduces Biased Reasoning in Chain-of-Thought"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zGzs5SIwT8 | A Skewness-Based Criterion for Addressing Heteroscedastic Noise in Causal Discovery | main | Active | Causal Discovery;Heteroscedastic Noise;Score Matching | causal reasoning | 3;5;6;6;10 | 2;4;3;4;4 | 3;2;3;4;3 | 2;2;3;3;4 | 3;3;4;3;4 | 6 | 3.4 | 3 | 2.8 | 3.4 | 0.657794 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is the algorithm scalable? It would be valuable to discuss if this approach can handle high-dimensional datasets effectively and whether its complexity scales polynomially or exponentially with the number of variables. \n\n2. Can you discuss the relation between the proposed method with other types of causal discovery algorithms, such as (1) constraints-based algorithms such as PC algorithm etc., and (2) continuous optimization based algorithms such as the one in https://arxiv.org/abs/1803.01422, which provides scalability? \n\n3. What are the effects of small sample sizes on SkewScore’s performance? It would be great if authors can discuss the convergence guarantee of the algorithm in terms of the size of samples. \n\n4. Can the proposed algorithm provide confidence intervals or uncertainty quantification in measuring the skewness or determining causal directionality?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. An example in Section 5 provides a clear, practical illustration of the algorithm, connecting theoretical concepts with application. \n\n2. This paper provides a well-structured and self-contained summary of prior works in causal discovery algorithms based on functional causal models, allowing readers to clearly follow the evolution for learning causal graphs. As a reviewer, this summary enables me to track the advancements in the field and understand the recent progress. \n\n3. The core idea behind the proposed method is simple and intuitive, based on the asymmetry of score skewness to identify causal directions. This approach is conceptually accessible and computationally efficient solutions. \n\n4. The experiments provide strong empirical evidences of the paper’s contributions, with a wide range of scenarios that demonstrate the robustness of the proposed framework in heteroscedastic settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a novel causal discovery algorithm that leverages skewness to determine causal direction when heteroscedastic noise (HN) is present."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper provides correctness guarantees only for the two-variable case, where skewness in the score function can reliably distinguish causal directions. However, for settings with more than two variables, there is no formal guarantee of correctness for Algorithm 1. \n\n2. Current empirical results seem insufficient in showcasing its performance in high-dimensional scenarios. In other words, it remains unclear whether SkewScore scales effectively as the number of variables grows significantly. \n\n3. While the authors claim robustness in the presence of latent confounders, this robustness appears to be limited to simple bivariate cases. In multivariate settings, SkewScore can still produce incorrect causal graphs when latent confounders are present. For instance, in the causal structure $X_1 \\rightarrow X_2 \\rightarrow X_3$ with a latent confounder $L$ influencing both $X_1$ and $X_3$ ($X_1 \\leftarrow L \\rightarrow X_3$), the method would infer the structure $X_1 \\rightarrow X_2 \\rightarrow X_3$ along with a spurious direct link $X_1 \\rightarrow X_3$. This incorrect edge arises because the latent confounder $L$ induces a conditional dependence between $X_1$ and $X_3$, which SkewScore interprets as a direct causal link. As a result, while the method can handle some latent confounding, it does not offer sufficient mechanisms to detect and mitigate confounding in more complex structures, limiting its application in practical scenarios where latent variables are common."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Is it possible to provide identification results such as 'the skew score is larger under the wrong causal direction' without symmetric noise assumption?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The notation is well-organized and the paper is easy to follow overall."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a causal discovery method for a class of restricted structural causal models with heteroscedastic symmetric noise. The identification of the causal graph is established using a score that quantifies the skewness of the score function. A causal discovery algorithm is then proposed based on the skewness score."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Fact 2 is incorrect. Let X follow univariate exponential distribution with parameter $\\lambda$, then $E[\\frac{d \\text{log} p(X)} { dx}] = -\\lambda$. Its proof is also incorrect. \n\n2. The proof of Theorem 1 heavily relies on the assumption of symmetric noise. Without this symmetry, skewness no longer provides useful implications for the causal direction, as the proof of Theorem 1 breaks down. \n\n3. Definition 1 needs detailed discussions. As stated, \"this measure captures the asymmetry...,\" it would be helpful to demonstrate that it is zero when the distribution is symmetric."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How is Theorem 1 applied to establish the causal direction between variables $X$ and $Y$ in the experiments? Equations (4) and (5) represent the population mean, while in experiments, we can only obtain the sample mean.\n2. The performance of SkewScore in terms of accuracy, as shown in Figure 3, is not consistently superior to other methods. Specifically, SkewScore outperforms others in the case of GP-sig Student's t in Figure 3(a), and in the cases of GP-sig Gaussian and GP-sig Student's t in Figure 3(b). What are the computation times associated with the different methods?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of the SkewScore, which is leveraged to identify causal structures without requiring the extraction of exogenous noise.\n2. A theoretical extension of the criterion to multivariate cases, along with empirical validations showing its robustness, particularly in scenarios involving latent confounders."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a novel approach to uncovering causal relationships in data with heteroscedastic noise, which challenges traditional causal discovery models. The authors propose the use of a skewness-based criterion derived from the score of data distributions to identify causal directions, establishing that this measurement is zero in the causal direction but non-zero in the anti-causal direction."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "See questions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "### On Proposition 5\nCould you elaborate on why, “when $\\sigma(x)$ is not constant (i.e., in the heteroscedastic model), $c \\neq 0$ in most cases”? More broadly, could you comment on the condition $c=0$? This characterization could be crucial as it defines a class of distributions $p(u, x)$.\n\nProposition 5 considers $f$ in a finite-dimensional linear space, specifically polynomials. Could neural networks with hidden layers be considered also (since Example 7 doesn’t)?\n\nIt’s interesting that $f$ is required to lie in a “large finite-dimensional space.” I would assume that if $f$ were in an infinite-dimensional functional space, the space should instead be “small.” Could you share your thoughts on this apparent contrast?\n\n### Additional Comments\nCould similar theoretical results be proven for standard skewness?\n\nAssumption 4: Typically, “regularity” refers to the properties of a well-behaved functional space. In Assumption 4, condition 1) is indeed a regularity condition, but it’s unclear if condition 2) qualifies as such. Additionally, could you explain why condition 2) is considered mild?\n\nMinor: There is a duplicate bibliography entry for Mityagin (2015)."
},
"rating": {
"value": 10
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper provides a neat and effective solution to a critical problem. The connection between score skewness and causal direction is both theoretically grounded and practically effective.\n\nThe extensions to multivariate cases and the potential applicability under hidden confounding are valuable contributions.\n\nThe theoretical analysis appears rigorous (though I haven’t verified every detail).\n\nThe paper is clearly written, with helpful elements like the two properties of the score function, the motivation of score skewness by analogy to standard skewness, and well-designed figures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method to identify causal directions in heteroscedastic symmetric noise models (HSNMs) using the skewness of the score function, which is the gradient of the log density. The skewness considered here is analogous to standard skewness but is distinct, offering computational advantages. Importantly, the paper proves that the score's skewness is zero only in the causal direction. This idea is later generalized to the multivariate case. The proposed method also shows robustness in the presence of hidden confounders, and a theoretical analysis is provided for an \"additive\" confounder in the bivariate case. The method demonstrates strong empirical performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The generality of Assumption 1, which defines the applicable HSNMs, could be explored further, although the paper presents sufficient results for a conference submission.\n\nThe method's applicability under hidden confounding might be limited to special confounding structures."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Which ER graph model is used in the experiments, $G(d, p)$ or $G(d, m)$? \n2. Can you explain in more detail why $c\\neq0$ in general in Remark 6?\n3. What is the runtime for the order learning and independent tests separately?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The method is novel and can be potentially applied to causal discovery with latent confounders.\n2. The theoretical justification of Assumption 1 seems sound, and the experiments showed that the proposed method works particularly well on sparse HSNMs.\n3. The author extensively evaluated the method when the assumptions are violated, e.g. non-symmetric noise distribution, and latent confounder in the multivariate setting, and proved its effectiveness and robustness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the author proposed a novel causal learning algorithm for heteroscedastic symmetric noise models (HSNMs), where the noise term of a variable $X_i$ can be parameterized by $\\sigma(pa(X_i))N_{X_i}$ with symmetric noise $N_{X_i}$. Specifically, the author showed that the skewness of the score function (SkewScore) in the $X_i$-coordinate is zero if and only if $X_i$ is a leaf node under certain assumptions. Therefore, the topological order can be identified by iteratively removing the variables with the least SkewScore. Conditional independence tests are then applied to determine the edges. Its validity for the HSNM with latent confounder is also discussed. Extensive experiments showed that the proposed method is effective on HSNMs and is robust to assumption violations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The author mainly used ER1 to evaluate the methods on HSNMs without latent confounders, which is relatively sparse.\n2. I think the justification of Equation (6) was a bit missing. Proposition 5 justified that Assumption 1 is mild, and therefore Equation (5) in the bivariate setting holds. However, there is no such explanation for Equation (6). Can you provide some concrete examples where Equation (6) is reduced to some known results, or simplified by specific function and noise classes?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024a,\ntitle={A Skewness-Based Criterion for Addressing Heteroscedastic Noise in Causal Discovery},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zGzs5SIwT8},\nnote={under review}\n}"
},
"abstract": {
"value": "Real-world data often violates the equal-variance assumption (homoscedasticity), making it essential to account for heteroscedastic noise in causal discovery. In this work, we explore heteroscedastic symmetric noise models (HSNMs), where the effect $Y$ is modeled as $Y = f(X) + \\sigma(X)N$, with $X$ as the cause and $N$ as independent noise following a symmetric distribution. We introduce a novel criterion for identifying HSNMs based on the skewness of the score (i.e., the gradient of the log density) of the data distribution. This criterion establishes a computationally tractable measurement that is zero in the causal direction but nonzero in the anticausal direction, enabling the causal direction discovery. We extend this skewness-based criterion to the multivariate setting and propose \\texttt{SkewScore}, an algorithm that handles heteroscedastic noise without requiring the extraction of exogenous noise. We also conduct a case study on the robustness of \\texttt{SkewScore} in a bivariate model with a latent confounder, providing theoretical insights into its performance. Empirical studies further validate the effectiveness of the proposed method."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Causal Discovery",
"Heteroscedastic Noise",
"Score Matching"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/09f74a4c1ee9476c978d3330a568927cec8ff225.pdf"
},
"presentation": null,
"primary_area": {
"value": "causal reasoning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "A Skewness-Based Criterion for Addressing Heteroscedastic Noise in Causal Discovery"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zHeHIIFQVF | Train once and generalize: Zero-shot quantum state preparation with RL | main | Active | Quantum State Preparation;Deep Reinforcement Learning;Zero-shot Inference;Off-the-shelf Algorithms;Generalization | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;3;5;6;6 | 4;4;5;5;4;3 | 2;2;1;2;3;3 | 1;2;2;2;2;3 | 2;2;1;2;2;3 | 4.333333 | 4.166667 | 2.166667 | 2 | 2 | -0.411765 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why retraining is not required needs further explanation and study?\n2. Why was the 9 qubit agent not trained till convergence?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Upto 60% shorter circuit depths are obtained.\n2. Generalized to more than 2 qubits without re-training."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a zero-shot quantum state preparation approach. RL has been used.\nA moving goal post reward function has been designed. Training is performed on less than 0.0001% of the\nstate space and then generalized."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Proof of Prop. 2 of convergence is not completely convincing. Conditions \nunder which it is valid need rigorous statement.\n2. No guarantee that the potential function will be strictly convex. No consideration\nis given to this.\n3. It is not clear why retraining is not required. How general is this is not stated."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- see above.\n\n- HSH gate in the gateset:\n - What is the rationale behind including HSH gate in the gateset $G$ (action space)? Because in principle the agent should learn the symmetry on $S$ w.r.t. to $X$ and $Z$ operations by itself. I believe this inductive bias is introduced during the learning process while the whole point of using RL is not to have any kind of bias.\n\n - Also HSH gate has three single qubit gates. In Table 1, do you count it as one gate for all the methods? Do you use the same gateset for all the methods for a fair comparison?\n\n - Can you provide an ablation study with and without HSH gate to see how the number of gates differ between these two settings for your method?\n\n- Why is there no circuit size numbers for $9$-qubit DRL (local gates) in Table 1?\n\n- In Fig. 3(d), $n=9$, can you please explain the behaviour of the circuit size for depth $t=1, 2$ signficantly larger for your method compared to other two baselines?\n\n- How does the algorithm perform for the QSP task when the target states which are complex and physically relevant but are not classically simulable?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The paper does a reasonable job introducing the quantum state preparation task and, from the presented difficulties of the domain, manages to derive its adaptations to standard RL employed to tackle this domain. The adaptations presented are sound and straight-forward. The results give a nice guideline for future research in that direction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors employ reinforcement learning (RL) to the task of quantum state preparation (QSP) on physically relevant states. Additionally, their method enables zero-shot preparation for any target state when the system size is fixed. \nNumerical results for systems with up to $9$ qubits show that their method generates circuits up to $60$% shorter than the baseline methods evaluated in the study."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The presentation of the paper is somewhat convoluted and challenging to follow; clearer descriptions would be helpful.\n - The contributions could be better highlighted, such as the introduction of the moving goalpost reward (MGR) for the QSP task.\n - The methodology and results, presented through plots, are introduced quite late in the paper. Providing clearer scenario descriptions of the experiments would improve readability.\n - A clearer contrast with related work would be helpful.\n - Figure legends could be enlarged for improved readability.\n - To aid understanding of the MGR function, a figure illustrating its workings would be valuable.\n\n- The paper appears to misinterpret the QSP definition by assuming access to the target state $|\\psi\\rangle$. Typically, in QSP, one has access only to a succinct (classical) description of the target state, not the circuit that implements it. However, in this approach, the authors start each episode with $|\\psi\\rangle$ as the initial state and then find a circuit to prepare $|\\mathbf{0}\\rangle^{\\otimes n}$ for zero-shot state preparation. This approach may not be physically feasible in a lab; if the circuit for $|\\psi\\rangle$ is known, the rationale for using RL (or any method) to find a circuit that prepares $|\\mathbf{0}\\rangle^{\\otimes n}$ and then inverts it is unclear. It might be argued that the given $|\\psi\\rangle$ circuit is too deep or not NISQ-friendly, necessitating a more compact architecture, though this would still be costly in a real lab setting.\n\n- Insufficient comparison with relevant state-of-the-art methods for the quantum state preparation task:\n - RL methods: [1-4]\n - ML methods: [5,6]\n - SAT-based methods: [7,8]\n\n- The algorithm's performance under common noise models, such as state preparation and measurement (SPAM), bit flip, phase flip, depolarizing noise, and thermal relaxation is not evaluated. These noise sources are standard in quantum computing.\n\n- Several claims in the paper lack theoretical or numerical support. Ablation studies providing numerical evidence would help support these claims:\n - Line 306-308: \"However, the cumulative reward obtained ..... terminating the episode.\"\n - Line 803-806: \"Finally, for the linear-connectivity agents, .... faster with this term.\"\n\n- The paper lacks open-source code and a reproducibility statement.\n\n\n[1] Fosel, Thomas, et al. \"Quantum circuit optimization with deep reinforcement learning\", arXiv:2103.07585 (2021)\\\n[2] Patel, Yash J., et al. \"Curriculum reinforcement learning for quantum architecture search under hardware errors\", ICLR (2024)\\\n[3] Zen, Remmy, et al. \"Quantum Circuit Discovery for Fault-Tolerant Logical State Preparation with Reinforcement Learning\", arXiv:2402.17761 (2024)\\\n[4] Kremer, David, et al. \"Practical and efficient quantum circuit synthesis and transpiling with Reinforcement\nLearning\", arXiv:2405.13196 (2024)\\\n[5] Wang, Hanrui, et al. \"Quantumnas: Noise-adaptive search for robust quantum circuits.\" 2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA)\\\n[6] Wu, Wenjie, et al. \"Quantumdarts: differentiable quantum architecture search for variational quantum algorithms\", ICML (2023)\\\n[7] Peham, Tom, et al. \"Depth-Optimal Synthesis of Clifford Circuits with SAT Solvers\", QCE (2023)\\\n[8] Schneider, Sarah, et al. \"A SAT Encoding for Optimal Clifford Circuit Synthesis\", ASPDAC (2023)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The questions are included in Weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors introduce a novel reward function that is inspired by the intrinsic nature of the QSP problem, integrating it into the RL framework to enhance performance in terms of circuit size reduction compared to existing algorithms.\n2. The proposed algorithm demonstrates zero-shot generalization, significantly decreasing computational complexity compared to traditional RL algorithms, which require retraining for each new target state.\n3. The authors provide a theoretical analysis of the generalization capabilities of the proposed algorithm."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study presents a zero-shot reinforcement learning (RL) algorithm that incorporates a novel reward function specifically designed for quantum state preparation (QSP). The authors concentrate on stabilizer state preparation tasks involving up to 9 qubits. Notably, the proposed algorithm demonstrates strong generalization capabilities on unseen data, utilizing a training dataset that is 0.001% smaller than the total state space. Furthermore, the quantum circuits generated by this algorithm achieve a circuit size reduction of over 60% compared to existing methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. As someone who is not well-versed in RL algorithms, I found it somewhat challenging to follow the overall algorithms framework. The authors present the general RL algorithms separately before introducing their specific formulation for QSP tasks along with the novel reward function. It would enhance clarity if the authors summarized the algorithm in a table format.\n2. The authors primarily use the total number of gates in the circuit, including both single-qubit and two-qubit gates, as the main evaluation metric for circuit size. However, two-qubit gates are more challenging to implement in practical devices and are more susceptible to noise. It would be beneficial to separately compare the counts of single-qubit and two-qubit gates in the circuits generated by the proposed algorithm against those produced by existing algorithms.\n3. Typos: Line 256: \"a the target\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The term \"Zero-shot\" is ambiguous. Typically, \"zero-shot\" refers to training without class labels. In the manuscript, the authors seem to be referring to the unified target trick, which should be clarified.\n\n2. In the introduction, the authors claim that their RL method is applicable to \"arbitrary states.\" However, the states studied are specifically stabilizer states. The authors should either correct this claim or provide results for additional categories of states.\n\n3. How are the experimental results related to the theoretical lower bound of generalization? The authors should consider displaying the success probability achieved during their experiments.\n\n4, What are the definitions of $q$ and $N$ in Lines 517-518? Additionally, the last equation should be numbered for clarity."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The problem studied is highly relevant to the quantum computing community. The manuscript presented a RL algorithm to efficiently prepare stabilizer state, which is valuable for future research.\n\n2. The numerical results is promising compared to conventional algiorthm, even with connectivity constraints, showing the potential utility on near-term quantum computers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript studied the problem of quantum state preparation (QSP). While theoretical lower bounds for the required operations have been established, efficiently approximating quantum states on near-term quantum computers remains an open question. The authors proposed a reinforcement learning (RL) algorithm for QSP, focusing on the preparation of stabilizer states— a set of states of significant practical interest—using Clifford gates. The authors introduced a unified target state by replacing $|\\psi\\rangle$ with $|0\\rangle$ and approximating backwards, referred as \"Zero-shot\" in the manuscript. Additionally, the authors proposed a moving-goalpost reward (MGR) function that aligns the maximum cumulative reward with the highest final fidelity. On the experimental side, the authors conducted experiments with up to 9 qubits, with and without connectivity restrictions, achieving better performance in terms of gate count compared to referenced algorithms. Theoretically, they proved a loose lower bound on the probability of generalization success."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The nanuscript lack discussing about relevant literatures. The \"Zero-shot\" trick is not new; amany existing works have adopted it in reinforcement learning for quantum circuit synthesis and compiling, with QSP being only a subset [1,2]. \n\n[1] Zhang, Yuan-Hang, et al. \"Topological quantum compiling with reinforcement learning.\" Physical Review Letters 125.17 (2020): 170501.\n[2] Qiuhao, Chen, et al. \"Efficient and practical quantum compiler towards multi-qubit systems with deep reinforcement learning.\" Quantum Science and Technology (2024).\n\n2. The claim in the abstract that \"To our knowledge, this is the first work to prepare arbitrary stabilizer states on more than two qubits without re-training\" is questionable. A recent work [3] also proposed a reinforcement learning algorithm for Clifford circuit synthesis.\n\n[3] Kremer, David, et al. \"Practical and efficient quantum circuit synthesis and transpiling with Reinforcement Learning.\" arXiv preprint arXiv:2405.13196 (2024).\n\n3. The authors established a loose lower bound for the probability of generalization success (as indicated in the last equation of the main text). In Table 2, the probability decreases with qubit size, raising concerns about the generalizability of the proposed algorithm. Additionally, in Line 517, the authors stated, \"We did not train the 9-qubit agent to convergence...,\" which raises further concerns about the trainability of the proposed RL algorithm."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the \"Weaknesses\" above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This manuscript introduces a novel novel reward function with provable guarantees, which adds a level of theoretical robustness to the approach.\n- The proposed method's success on stabilizer states, which are crucial for quantum error correction and other quantum information processes, suggests its applicability.\n- The paper demonstrates that their method generates circuits that are up to 60% shorter than those produced by existing algorithms in some cases, which is an improvement in efficiency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The manuscript presents a deep reinforcement learning approach for quantum state preparation (QSP) . The authors design a novel reward function with guarantees that significantly scales beyond previous works, allowing for generalization to unseen states. They demonstrate their method on stabilizer states up to nine qubits, achieving state-of-the-art performance by preparing target states with varying degrees of entanglement."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- I believe the term \"zero-shot\" in the title of this paper is misleading.Zero-shot learning (ZSL) is a problem setup in deep learning where, at test time, a learner observes samples from classes which were **not** observed during training, and needs to predict the class that they belong to. However, in the context of this paper, training is conducted on a set $\\mathcal S$ of $n$-qubit states. From my perspective, this setup aligns more with traditional supervised learning rather than what is commonly referred to as \"zero-shot\" learning.\n- The authors claim that \"The key idea that allows us to obtain zero-shot inference is the following: a sequence of actions starting in state |ψ⟩ leading to the state |0⟩ yields the inverse of a circuit preparing |ψ⟩ starting from |0⟩\". However, I believe this concept is not original to this paper. For example, a similar idea was proposed in [1]. Yet, the authors have failed to cite it.\n- I doubt the authors overstated their results. It's important to distinguish between the preparation of arbitrary quantum states and the preparation of arbitrary stabilizer states. The latter is a more specialized and arguably less challenging problem compared to the general case of arbitrary state preparation. Stabilizer states, due to their specific properties and structure, may be more amenable to efficient preparation methods, which might not directly translate to the broader and more complex task of preparing any arbitrary quantum state.\n\n[1] Huang, Hsin-Yuan, et al. \"Learning shallow quantum circuits.\" *Proceedings of the 56th Annual ACM Symposium on Theory of Computing*. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- This max { } function notation is confusing to me, L323 (e.g.) uses max{a : b}, L324 uses max{a | b}? How is this supposed to be read and understood?\n- Why was n=9 picked as the benchmark and not something higher, since stabilizer circuits can be so efficiently computed? L517 mentions that the he 9-qubit agent was not trained to convergence, why is that?\n- How were the observation and action choices implemented in detail?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The ideas of this paper are presented understandable and mostly concise, the format is clean and the structure is logical. The background is explained very extensively. While I think the application of RL for stabilizer states is has fundamental issues, see below, I find the concept of reverse construction for a fixed end-state an interesting idea. I also think the extra effort for the study of entangling entropy and the attempt at a generalization bound, albeit quite loosely estimated, deserve praise and should be more common in empirical QRL papers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work utilizes a reinforcement learning PPO agent to solve the quantum state preparation problem. The key idea involves a potential based reward that is based on goalposts - earlier fidelity penalties are weighted less to encourage exploration and the preparation of partially suboptimal but intermediately necessary states. Training is conducted on stabilizer circuits, which can be completely classically simulated and only involve the H, CNOT and phase gate variants, the placement of which are used as actions. To reduce the set of possible end-target states the RL agent trains to reverse the state preparation, starting from the target state towards the |0> state. After training a one-shot model is evaluated on a study of random target states and a set of states relevant for quantum error correction is shown to yield circuits preparing valid states with fewer gates than analytical models. An analysis on the entangling entropy is given empirically and a upper bound is approximated via Monte-Carlo estimations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Nonetheless, I think this paper has three major issues:\n\n- While fundamental explanations are generally good, the first 5 1/2 pages are simply explanation of fundamental QC / RL basics (excluding Figure 2). The first original section starts at the bottom of page 6, which is not efficient use of space. Much of Chapter 2 could be formulated much more concise or moved to an appendix. In general the information and result density is rather light for a A* publication with 10 pages. On the other hand the actual RL algorithm realization, and the training details should be made much more clear. From what I understand the observation is the whole stabilizer tableau (flattened probably?) and the actions are discrete choices over the set of all applicable gate-actions - or gate-target pairs for two-qubit gates. I am not clear if the addition of g_i in L343 is included somehow, or how often the actions are chosen. Once for each qubit for every layer? Some of the training details like hyperparameters are in the appendix, but are critical information for a self-contained paper and should be moved to the main-paper. (E.g. the training episodes, the max-T curriculum for increasing n, the fidelity relaxation and the fact that the otherwise-case of the reward was not even used as described in the paper.)\n- The goalpost potential-reward as part of the contribution feels quite over-engineered and I’d argue this work is lacking a critical evaluation in the RL context. Similarly, while the reverse generation thought is interesting, without a comparison to the forward-generation training (zero to target) I find it hard to judge any significance. A study of the generation with a simpler fidelity based reward as is found in the literature (even the ones cited here, Kölle et al., Gabor et al., Wu et al.) would have helped, as would a test with a simpler, fixed step-based regularizer instead of the goalpost. A direct comparison of ‘backward’ vs. ‘forward’ generation training would also help support the claims made here.\n- Finally, I think the RL application for stabilizer circuits in particular has fundamental issues. While they are indeed used in QEC, the point and the reason for that is, that they are completely and very efficiently classically simulated. (The abstract of Aaronson & Gottesman mentions thousands of qubits.) In other words, any efficiency gain w.r.t. circuit size, generalization ability or else, could also be classically brute-forced. Without application to circuits with unsimulatable quantum-effects (including rotation gates, e.g.) efficient one-shot generalization is an interesting insight, but lacking overall impact in terms of QC relevancy.\n---\nMinor Issues:\n- L256 Typo: parameters: [a] the target …\n- Inconsistent use of ‘Fig.’ and ‘Figure’ references."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Reinforcement learning framework for zero-shot quantum state preparation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024train,\ntitle={Train once and generalize: Zero-shot quantum state preparation with {RL}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zHeHIIFQVF},\nnote={under review}\n}"
},
"abstract": {
"value": "Quantum state preparation is an essential cornerstone of quantum information science and quantum algorithms. Notwithstanding worst-case hardness results, designing efficient and scalable methods for approximate state preparation on near-term quantum devices remains a significant challenge. In this work, we present a deep reinforcement learning approach to quantum state preparation that allows for the zero-shot preparation of any state at a fixed system size. We scale significantly beyond previous works by designing a novel reward function with provable guarantees. In our experiments on stabilizer states up to nine qubits, we achieve generalization to unseen states by training on less than $10^{-3}$\\% of the state space. We prepare target states with varying degrees of entanglement content and obtain insight into the quantum dynamics generated by our trained agent. Benchmarking shows our model produces stabilizer circuits up to $60$\\% shorter than existing algorithms, setting a new state of the art. To our knowledge, this is the first work to prepare arbitrary stabilizer states on more than two qubits without re-training."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Quantum State Preparation",
"Deep Reinforcement Learning",
"Zero-shot Inference",
"Off-the-shelf Algorithms",
"Generalization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/52b4326078a6c1e06c5148280b1d8e049997093b.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/114d5149e0368650799a76966c9d81f6b95be3c5.zip"
},
"title": {
"value": "Train once and generalize: Zero-shot quantum state preparation with RL"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zHf7hOfeer | Zero-cost Proxy for Adversarial Robustness Evaluation | main | Active | Neural architecture search;adversarial robustness;zero-cost proxy | other topics in machine learning (i.e., none of the above) | 5;6;6 | 3;3;4 | 3;3;3 | 3;4;3 | 2;3;2 | 5.666667 | 3.333333 | 3 | 3.333333 | 2.333333 | 0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- In the preliminatries, 3.1., the formulation is bit awkward; if the function $f_{\\theta_t}$ is a function $\\mathbb{R}^d \\rightarrow \\mathbb{R}$, then the output will be a single scalar, but in the equations the l2-norm calculation is applied … I also checked the reference paper (Xu et al., (2021)), but it is slightly different. \n\n- The overall experiments on Kendall’s tau seems to be not high (Fig.2,Fig.4). (Relatively high, but not significant in absolute terms). Can the authors explain this in further details?\n\n- The cost analysis is done with the GPU search day metric. How is the cost calculated? Under which GPU?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is easy to read through, well presented.\n- The paper proposes relatively strong method for the proxy of searching adversarially robust architectures.\n- The paper includes numerous validations including performance under white-box, black-box attacks, datasets, numerous datasets, and ablation studies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper targets a lightweight proxy to assess the adversarial robustness of Neural Architecture Search (NAS) networks. The proposed proxy is represented as the product of two terms (Eq. 4). The first term, based on the intuition that adversarial and natural accuracy are reversely correlated, is introduced to consider adversarial accuracy and is experimentally validated. The second term is proposed through a theoretical approximation using the Neural Tangent Kernel. The proposed proxy is experimentally validated."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. To read through, I felt that the paper has to focus on two points : \n- Is the proposed method superior in terms of cost-efficiency? (zero-cost)\n- Is the proposed method superior in terms of robust accuracy?\n- For now, RQ1 and RQ2 looks a bit similar(line 255-256), before looking at the experimental results.\n\n2. I feel the cost-efficiency related explanation is slightly short. The authors could include detailed explanations for (line 183-185.) such as , the reason the authors chose to iterate samples over generating adversarial samples itself and why is it efficient."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "While the contribution in terms of speed reduction is significant for the proposed method, my concern is the increase in adversarial robustness as compared to the baselines is very minimal in the majority of the cases."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The reduction in the search cost is very significant. \n- The paper is well-written with typos. \n- From Table 3, the transferability results are good. \n- A novel dataset will be released by the authors, which will help advance research in the robust NAS direction. \n- Comparison with the latest state-of-the-art methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a zero-shot proxy that requires no training and depends only on the initial neural network weights to find the robust architecture. Using this zero-shot proxy reduces the neural architecture search speed drastically. Experiments are performed on multiple datasets with varying resolution and number of classes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the search cost is reduced significantly, the increase in the adversarial robustness is minimal (less than 1% in most cases for Table 1). A similar observation can be derived from the results in Table 4. \n- Minor typos: \"8/2550\" instead of \"8/255\" on line 301, \"We\" instead of \"we\" on line 63."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to Weaknesses Section"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ The preliminaries on the Neural Tangent Kernel (NTK) and input loss landscape are clearly presented, making the methodology easy to understand. All symbols and equations are well-defined. \n\n+ A robust theoretical justification for the proposed zero-shot proxy is provided by establishing the corresponding loss as an upper bound for adversarial loss.\n\n+ The efficiency of the search process using their technique is clearly demonstrated in the experimental section.\n\n+ The authors introduce a novel dataset called Tiny-RobustBench, which may prove valuable to the research community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a zero-shot proxy applied directly to the initial weights of a deep neural network (DNN), serving as an upper bound for adversarial loss. This approach enhances the adversarial robustness of the network without requiring adversarial examples. Theoretical justification for the zero-shot proxy is provided based on the Neural Tangent Kernel (NTK) and the input loss landscape. Experimental results demonstrate a 20x speedup over state-of-the-art robust neural architecture search (NAS) methods, with no loss in performance against black-box and white-box attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In the theoretical analysis section, $\\lambda_{\\min}(\\hat{\\Theta_{\\theta_o}})$ is approximated based on empirical observations to $-\\lambda_{\\min}(\\Theta_{\\theta_o})$, which weakens the theoretical foundation of the work. I wonder if the authors could establish the relationship between the two NTKs in strict mathematical terms, such as showing that the approximation holds true within a specific margin of error with a certain probability. Currently, the analysis relies on an empirically driven approximation, which undermines its theoretical robustness.\n- The performance of the black-box and white-box attacks is demonstrated using a single dataset (CIFAR-10) in Tables 1 and 2. The authors should consider providing performance comparisons across multiple datasets for the chosen baselines in both tables. Additionally, Table 2 should clearly present a comparison of search efficiency.\n- I am curious about the rationale for selecting only specific models (ResNet-18 and PDARTS) for comparison in Table 3. Would it be possible to include models from Table 1 to evaluate their transferability in Table 3?\n- There is inadequate justification for why their approach exhibits better transferability in Table 3. It would be beneficial to provide justification grounded in their theoretical contributions, particularly highlighting the unique advantages that their proposed techniques offer to enhance transferability.\n- The authors should discuss the assumptions made in their analysis and their validity within the context of the study. For instance, when transforming from Equation 1 to Equation 3, it would be helpful to clarify the assumptions involved and how they apply in their setting. Specifically, I am uncertain whether the assumption of infinite-width DNN parameters is valid; if it is not, what implications does that have for this paper? I encourage the authors to include these details, even if they are standard, as doing so would enhance the paper's readability and facilitate the assessment process."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024zerocost,\ntitle={Zero-cost Proxy for Adversarial Robustness Evaluation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zHf7hOfeer},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep neural networks (DNNs) easily cause security issues due to the lack of adversarial robustness. An emerging research topic for this problem is to design adversarially robust architectures via neural architecture search (NAS), i.e., robust NAS. However, robust NAS needs to train numerous DNNs for robustness estimation, making the search process prohibitively expensive. In this paper, we propose a zero-cost proxy to evaluate the adversarial robustness without training. Specifically, the proposed zero-cost proxy formulates the upper bound of adversarial loss, which can directly reflect the adversarial robustness. The formulation involves only the initialized weights of DNNs, thus the training process is no longer needed. Moreover, we theoretically justify the validity of the proposed proxy based on the theory of neural tangent kernel and input loss landscape. Experimental results show that the proposed zero-cost proxy can bring more than $20\\times$ speedup compared with the state-of-the-art robust NAS methods, while the searched architecture has superior robustness and transferability under white-box and black-box attacks. Furthermore, compared with the state-of-the-art zero-cost proxies, the calculation of the proposed method has the strongest correlation with adversarial robustness. Our source code is available at https://anonymous.4open.science/r/ZCP-05B6."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Neural architecture search",
"adversarial robustness",
"zero-cost proxy"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/6617f2a0a717046cf4003582bdfec08791e443dc.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Zero-cost Proxy for Adversarial Robustness Evaluation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zIPFFhowuM | Proof Search Augmented Language Models | main | Active | reasoning;transformers;neural theorem proving;neural network architectures;differentiable algorithms | neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.) | 5;5;6;6 | 4;3;4;4 | 2;3;3;2 | 2;2;3;3 | 2;3;3;3 | 5.5 | 3.75 | 2.5 | 2.5 | 2.75 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Is there any harder QA benchmark that involves logic or multi-step reasoning suitable to evaluate the proposed method?\n\n- How do current LLMs w/wo CoT perform in the SimpleLogic dataset?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed method is effective. The experiments show the proposed method trained with rule labels can obtain nearly 100% accuracy on the OOD splits, which largely improves the previous methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper focuses on multi-step reasoning tasks that require a model to predict whether a statement is true given unification rules and facts in natural language. The authors first define several rule templates that contain different numbers of unification terms, then train a Transformer and cross-attention module to fill the term slots with entities and their features from a sentence. Lastly, the extracted rule is input to a neural theorem prover to obtain the proof and the truth prediction. The author uses three types of supervision: label supervision, proof supervision, and rule supervision. Experiments show training with rule supervision can obtain 96.7% accuracy on the OOD test split."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The evaluation dataset and the compared method are too simple. Since the paper claims current TLMs have limits in reasoning, it should compare with recent SOTA LLMs and show their incapability in a complicated benchmark.\n\n- The comparison is unfair. The proposed method with proof and label supervision achieves inferior performance to baseline TLM and only obtains nearly 100% accuracy with rule-level supervision. A more appropriate baseline should also have these labels. For example, an LLM trained to generate text in \"fun :- happy kind\" format and predict the label with CoT."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How does the approach work on tasks other than SimpleLogic?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper presents multiple training objectives and studies which impact performance.\n- An efficient and hardware-aware algorithm has been proposed for proof search.\n- The authors show evaluations of the SimpleLogic dataset and generalization capabilities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce Proof Search Augmented Language Models (PSALMs), a differentiable proof search module combined with a transformer. The authors propose an efficient hardware-aware method for proof search (at further depths than prior works) and pruning and performing ablations to identify the strengths of granular rule supervision."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There are limited empirical evaluations other than SimpleLogic.\n- More investigation could be conducted into the scaling of the proof search."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Could you explain the reasons why loss L_{rule} is minimized much smaller than the other two? It seems to me it shows that many entries in matrix T are zeroes.\n\n- As a follow-up question, since the loss terms can be minimized to a different scale, should different weights improve the performance when combining them such as the results in Tables 1 and 2?\n\n- Section 3.1 states that \"Encoding rules independently prevents the TLM from “shortcutting” the NTP,\" could you provide corresponding empirical evidence and quantify the impact?\n\n- Figure 3 shows the range of proof score is different from the case on the left and the case on the right. Could you explain how the larger range for L_{rule} affect its generalization and separation between positive and negative ones?\n\n- Could you provide the key reasons underlying the big differences between the last two rows of Table 1 in terms of OOD accuracy and OOD soundness?\n\n- I assume the vanilla TLM model is the DeBerta model with 435M parameters. However, there are manyother larger LLMs developed. Could you provide a baseline or baselines using state of the art LLMs?\n\n- Could you explain how the proposed method overcomes the exploding computational cost compared to other methods? As far as I could understand, the pruning and parallelization do not change the nature of exponential growth.\n\n- Could you comment on the complexity of the proposed system with respect to the depth? In addition to depths 5-6, could you provide results for depths 7, 10, and 20?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well written and the key components are motivated and explained well. The experimental results seem to be systematic and convincing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a PSALM model that combines a transformer-based language model with a proof search system. Compared to the NTP introduced by Rocktaschel & Riedel in 2017, it uses pruning and parallel execution to improve scaling and throughputs. Using a new loss term, the system provides significant improvements over a vanilla model for (limited) out-of-distrbution generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The novelty introduced in the paper seems limited. Pruning is well known; while it improves scaling, in the worst case, the complexity remains the same. Also the improvements over the vanilla model do not reflect the improvements over the state of the art transformer models.\n\nThe experimental results are not fully explained. For example, the row of PSALM L_{rule} in Table 1is not explained and no comments are provided in the paper even though it is a key result."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why are language models such as Llama not used in the experiments?\n2. If language models are used, do we still need the complex procedure to make it work?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper proposes a new method to improve the model reasoning abilities by semantics proof search. The experimental results on the SimpleLogic task are nice."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Summary:\n\nThis paper applies an encoder-only model to represent the rule and statement in the proof. Then, the Neural Theorem Prover (NTP) utilizes the representation to perform a backward-chaning search of proofs and sort them.\n\nContributions:\n\nThis paper proposes a new method to improve the model reasoning abilities by semantics proof search. The experimental results on the SimpleLogic task are nice."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The experiment section is insufficient. First, only one dataset named SimpleLogic is used in the experiment. Second, the author only uses one model in the experiment. Last but not least, there are some work aimed to improve the proof reasoning abilities. However, I have not seen the gap between those work and the method proposed by the authors.\n\n2. The terminology in this article is not to the standard. For instance, we typically refer \"transformer\" to encoder-decoder architecture models instead of encoder-only models."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We add differentiable proof search to transformers to improve generalization across problem distributions"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024proof,\ntitle={Proof Search Augmented Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zIPFFhowuM},\nnote={under review}\n}"
},
"abstract": {
"value": "Transformer language models (TLMs) exhibit an impressively general range of capabilities. A growing body of work aims to harness these models for complex reasoning problems expressed in natural language. However, recent theoretical and empirical results have revealed limits to the algorithmic generalization of TLM reasoning. Transformers trained to solve deduction problems from one distribution fail to solve instances of the same problem class drawn from other distributions. We propose to improve the systematic reasoning capabilities of TLMs via a differentiable proof search module, yielding proof-search augmented language models (PSALMs).\nIn a PSALM, a Transformer is responsible for predicting rule and statement representations for a neural theorem prover (NTP). The NTP performs a backward-chaining search over proofs, scoring them based on a soft unification operation. Our principal challenge is to train models to reason without also learning spurious features.\nOur results show that rule-level supervision allows PSALMs to successfully generalize across problem distributions in deduction tasks where vanilla transformers fail to learn systematic behavior. We also find we only need label supervision to adapt PSALMs to more natural text."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"reasoning",
"transformers",
"neural theorem proving",
"neural network architectures",
"differentiable algorithms"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a40e2dcd942299ee758c8eb9676946dfe2867784.pdf"
},
"presentation": null,
"primary_area": {
"value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Proof Search Augmented Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zIqLQVBxdd | The Latent Road to Atoms: Backmapping Coarse-grained Protein Structures with Latent Diffusion | main | Active | Protein Structure Reconstruction;Latent Diffusion;Discrete Protein Representations | applications to physical sciences (physics, chemistry, biology, etc.) | 3;3;5;5 | 4;4;3;4 | 1;3;3;3 | 2;2;3;2 | 1;1;3;3 | 4 | 3.75 | 2.5 | 2.25 | 2 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "A different paper was used to cite GenZProt in Section 5.1, instead of (Yang & Gomez-Bombarelli, 2023)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The model shows good structural accuracy and chemical validity.\nThe usage of latent diffusion for backmapping is new. Discretization of the local geometry for backmapping has been previously explored (Chennakesavalu & Rotskoff, 2024) using rotamer libraries, but the learnable discretization (via VQ-VAE) proposed by this paper is original and reasonable.\n\n(Chennakesavalu & Rotskoff, 2024) Data-efficient generation of protein conformational ensembles with backbone-to-side chain transformers, J. Phys. Chem. B 2024, 128, 9, 2114–2123"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new framework for backmapping, called Latent Diffusion Backmapping (LDB). This method uses a VQ-VAE to discretize local all-atom structures into a codebook latent space. The diffusion model is then trained on this latent space distribution, so that during inference, the latent variables are generated conditioned on the CG structure and then mapped back to all-atom structures via the VQ-VAE decoder."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While I think the modeling choices like discretization and latent diffusion make sense and are meaningful, the evidence of their advantages is not convincing, primarily due to the lack of evaluation on diversity or distribution matching between the ground truth and sampled conformations.\nIn GenZProt paper, diversity and the Earth Mover’s Distance (EMD) scores are reported in Appendix A, and there are qualitative analysis of diversity/distribution matching in Figure 8. In DiAMoNDBack paper, the diversity score is reported with the comparison with GenZProt in Table 1 and 2, and there are both quantitative (in JSD) and qualitative analysis of the generated distributions in Figure 5.\nSince the paper is tacking the ‘backmapping’ problem, which aims to reconstruct the fine-grained conformational ensemble conditioned on the CG structure, rather than protein side chain packing (PSCP), which might suffice to find a single probable fine-grained structure, some evaluation on the generated ensemble’s diversity and distribution matching is required.\nMoreover, the paper claims “Diffusion leverages stochastic noise, which allows for exploration across diverse conformations while maintaining structure validity. This is evident in the RMSD and Clash metrics, where our diffusion-based model consistently achieves better results. (line 485-505)“. The structure validity claim is well-supported by RMSD, GED, Clash, Interaction, GDR scores, but there is no evidence for the conformational diversity claim.\nI have a concern that the discretized latent space would be bad for generating diverse structures despite being better at generating chemically valid structures. So it would be great if the authors can show that the discretization does not harm the diversity. If the authors provide diversity and distribution matching evaluations, I am willing to consider increasing the rating."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "* \"Residues with fewer than 13 heavy atoms are excluded.\" Most amino acids have fewer than 13 heavy atoms. Could the authors clarify?\n* How are the internal coordinates defined exactly? Why are there 13 bond lengths, bond angles, and dihedral angles?\n* \"The network processes three inputs: node coordinates, atom types, and an initial noise vector.\" Why are atom types still involved at the latent diffusion stage?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The proposed latent diffusion approach has not been previously developed for the backmapping task. The approach is reasonable and appears to be effective.\n* By encoding the all-atom coordinates into node-level latents in a graph, the dimensionality of the generative modeling task is reduced.\n* The encoding and decoding of the latents from internal coordinates helps ensures that decoded structures are chemically valid."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper develops a model for backmapping coarse-grained protein structure. The model architecture consists of two parts (1) a VQ-VAE is trained to map all-atom structures into a discrete latent associated with each amino acid residue (2) a GNN is trained to generate these discrete latents from the coarse grained structure via diffusion. The method is assessed on PED, ATLAS, and PDB structures and is shown to outperform recent methods GenZProt and DiamondBack."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Originality**\n* Although the approach is reasonable and well-motivated, it amounts to a relatively straightforward application of currently popular ideas. Latent diffusion and discrete tokenization have been well explored in many related contexts, including protein structure. There are not any aspects of the model design that seem particularly surprising or insightful.\n* To improve on this axis, the paper should present nonobvious and/or nontrivial conceptual development that motivates the methodology.\n\n**Quality**\n* The experimental validations are brief and are of average quality. Given that the method is generative, i.e., produces a backmapped distribution, it is surprising that there are no distributional accuracy metrics. Chemical validity is also a rather low bar that can be passed by any heuristic reconstruction + relaxation. Some other metrics introduced by the authors are not well justified or explored.\n* To improve on this axis, the paper should present evaluations that go beyond than existing works in this space. Some suggestions include, e.g., energies, distributional similarities, or metrics used by the PDB in validating atomic models. Since there is a smaller body of ML work on this task, case studies or similar experiments would better make the case that the improvements are meaningful (and not just incremental).\n\n**Clarity**\n* The paper's structure is a little odd, placing a lot of emphasis on architectural and training details but deferring dataset and evaluation details to the appendix. I would recommend swapping this because the task is relatively unfamiliar to the ML audience.\n* The paper contains many overly verbose \"filler phrases\" that repeat similar points and do not add much to the exposition. Some examples of what I mean (there are many, many more):\n\n> Such dynamics lead to distinctive conformational variations, which contribute to the diverse functions\nof proteins and are of great significance for maintaining normal features in vital organisms. (What are \"distinctive variations\", \"normal features\", or \"vital organisms\"?)\n\n> Despite challenges posed by the scarcity and uneven distribution of protein conformation data, we\nemployed a Vector Quantized Variational Autoencoder. (What is an \"uneven distribution\"? Why does scarcity contraindicate a VQ-VAE, and why did you proceed with one anyways?)\n \nThese are just rhetorical questions --- my point is that the authors do not need to say these things if there is not a clear reason to say them. In general my sense is that the introduction could be trimmed by an entire page without loss of meaning.\n\n* To improve on this axis, the paper should be significantly rewritten for concision and clarity, and should focus on explaining things less likely to be familiar to the audience.\n\n**Significance**\n\nIn sum, although the work does technically advance the state of the art on this task, in my opinion the significance of the results and novelty of the method do not quite meet the bar for ICLR."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "NA"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- LDB effectively addresses the challenges of limited exploration in conformational space and accurately reconstructs all-atom 3D protein structures from coarse-grained representations. \n- The use of discrete latent representations simplifies the diffusion process and improves overall efficiency, while also enhancing structural accuracy and chemical fidelity. \n- The evaluation of LDB on multiple diverse protein datasets demonstrates its state-of-the-art performance and versatility, highlighting its potential for practical applications in computational biology."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents Latent Diffusion Backmapping (LDB), a novel approach for backmapping coarse-grained protein structures. LDB combines discrete latent encoding with diffusion to address challenges in reconstructing atomistic conformations. Results on multiple datasets demonstrate its state-of-the-art performance and versatility."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The presentation of the method is not very clear. Feel free to correct me if there are any misunderstandings.\n\n- In line 243, an SE(3)-equivariant GNN is used to output bond lengths, angles, etc. Does this indicate that an equivariant network generate an invariant representation? In line 284, the input of ProteinMPNN is CG models. Why does a CG model contain the atom type? \n- In line 286, why do you add noise to the CG model? In my opinion, it serves as a condition into the network, like the BERT in text-to-image models. But in Stable Diffusion, we never add noise to text embeddings.\n- In line 295, how do you parameterize the epsilon-theta? Is it the ProteinMPNN?\n\nMore ablation studies are required. Since a main contribution of this study is latent discrete diffusion, it would be interesting to see what would happen if the diffusion models are trained in the coordinate space and how the vocab size and hidden size affect the result."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "My major questions are above. Additional questions.\n\n* Line 168. What is n_f? What are \"element types of the protein\"? Why are the amino acids real valued instead of integer or categorical?\n\n* Line 177. There are no details with how the internal coordinates are obtained. This is another reason why this work is not reproducible due to the lack of algorithmic details.\n\n* Line 185. The work claims to generate all-atom structures but then this states only the C-alpha coordinates are used. So which is it?\n\n* Line 253. The authors point to Jing et al 2022 to reconstruct full-atom structure but Jing et al 2022 is torsional diffusion for small molecule conformations. How does this relate to the protein structures here?\n\n* Line 273. Where is your evidence of 3D diffusion doing poorly? We know SOTA methods like AlphaFold3 [1] and RFdiffusion [2] work very well in 3D.\n\n* Why was AlphaFlow not compared to?\n\n[1] https://www.nature.com/articles/s41586-024-07487-w\n\n[2] https://www.nature.com/articles/s41586-023-06415-8"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* Discrete latent diffusion approach is novel to modeling protein conformations.\n* LDB outperforms Genzprot and DiAMoNDBack on most metrics across the three datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Latent Diffusion Backmapping (LDB) is a generative model approach for modeling protein conformations. A discrete latent space is learned over all-atom protein structures with a VQ-VAE followed by a diffusion model to sample protein conformations. The work claims state-of-the-art (SOTA) results on three protein datasets which I do not agree with. LDB does not achieve SOTA on all metrics and the metrics are confusing as to whether they are relevant to evaluating protein ensembles."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have several major concerns.\n\n* After having read the paper twice (including the appendix), I still do not understand the method. The problem definition in section 3 is to sample from p(x|X,V) where x is the coordinates and (X, V) is the coarse grained (CG) representation. Then the method in section 4 proceeds to describe a VQ-VAE for compressing protein structures then a graph latent diffusion model to sample discrete latents. Which part is sampling x and which part is sampling (X, V)? The methods section is poorly written with many details left unanswered.\n\n* The paper claims several times that \"diffusion in high-dimensional spaces complicates the multi-step denoising process\" but provides no evidence of this. In fact, if the latent dimensionality is 36 then isn't this higher dimension than the 3 dimensional coordinates? I would have liked to see a direct comparison or some references but none are provided.\n\n* I do not understand the metrics. If the benchmarks are protein conformation datasets then there should be distributional metrics such as in AlphaFlow [1]. For instance, what does it mean for computing RMSD to PED00055 when it has 20-140? Do you take the min RMSD of the model's samples to the frames? Each metric described in the appendix lacks details on how to calculate them.\n\n* The paper in its current form is not reproducible. There just lacks too much details on the model, metrics, baselines and ablations (how was VAE-difusion and VQ-VAE-flow implemented?). Why was AlphaFlow not compared to?\n\n\n[1] https://arxiv.org/abs/2402.04845"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024the,\ntitle={The Latent Road to Atoms: Backmapping Coarse-grained Protein Structures with Latent Diffusion},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zIqLQVBxdd},\nnote={under review}\n}"
},
"abstract": {
"value": "Coarse-grained molecular dynamics simulations offer computational efficiency for exploring protein conformational ensembles and thermodynamic properties.\nThough coarse representations enable large-scale simulations across extended temporal and spatial ranges, the sacrifice of atomic-level details limits their utility in tasks such as ligand docking and protein-protein interaction prediction.\nBackmapping, the process of reconstructing all-atom structures from coarse-grained representations, is crucial for recovering these fine details.\nWhile recent machine learning methods have made strides in protein structure generation, challenges persist in reconstructing diverse atomistic conformations that maintain geometric accuracy and chemical validity.\nIn this paper, we present Latent Diffusion Backmapping (LDB), a novel approach leveraging denoising diffusion within latent space to address these challenges. \nBy combining discrete latent encoding with diffusion, LDB bypasses the need for equivariant and internal coordinate manipulation, significantly simplifying the training and sampling processes as well as facilitating better and wider exploration in configuration space. \nWe evaluate LDB’s state-of-the-art performance on three distinct protein datasets, demonstrating its ability to efficiently reconstruct structures with high structural accuracy and chemical validity.\nMoreover, LDB shows exceptional versatility in capturing diverse protein ensembles, highlighting its capability to explore intricate conformational spaces. \nOur results position LDB as a powerful and scalable approach for backmapping, effectively bridging the gap between CG simulations and atomic-level analyses in computational biology."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Protein Structure Reconstruction",
"Latent Diffusion",
"Discrete Protein Representations"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b2e524b09b9b83663885af44578dabf01f1b50de.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to physical sciences (physics, chemistry, biology, etc.)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "The Latent Road to Atoms: Backmapping Coarse-grained Protein Structures with Latent Diffusion"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zJbwrk1DHc | Hierarchical Classification via Diffusion on Manifolds | main | Active | hierarchical classification;graph diffusion | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5;5 | 3;4;3;3 | 2;2;2;3 | 2;2;2;2 | 2;2;2;2 | 4 | 3.25 | 2.25 | 2 | 2 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Please refer to the Weaknesses:\n\nThe most important questions are regarding the hypothesis (W1-3), missing related work (W7), and the originality (W5).\n\n2. Here are additional questions:\n\n1) Isn’t the inference cost for graph-diffusion inference much higher compared to bottom-up or top-down approaches? \n\n2) Which metric is the most important in the evaluation (L317-323)?\n\n3) Could incorrect predictions also propagate through graph diffusion and negatively impact inference?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Hierarchical classification is a significant problem.\n- The inference method using graph diffusion is interesting. \n- Various ablation studies were provided to demonstrate the effectiveness of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a training and inference method to address hierarchical classification, based on the assumption of hierarchical manifolds. First, a graph diffusion-based approach is introduced, utilizing predictions at all levels, which differs from traditional bottom-up/top-down inference methods. Next, hierarchical multi-modal contrastive finetuning is proposed. The approach demonstrated good performance on the iNat18/21 dataset, and various ablation studies were conducted to validate the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Clarity**\n\n1. Figure 1 is difficult to understand. Please provide more explanation. Also, is it a visualization of actual features? If so, how was it created? If it’s intended as an intuitive example, not actual data, what evidence supports this specific representation? \n\n2. While the hypothesis of hierarchical manifolds serves as a key motivation for the proposed method (Lines 165-169 & Figure 1), it is difficult to find strong evidence supporting this hypothesis. Specifically, why do pretrained features lie on a hierarchical manifold? I believe the structure of the feature space is significantly influenced by the training process, including factors such as the choice of loss function, model architecture, and data labeling. These factors can lead to feature spaces that do not strictly conform to hierarchical manifolds. This is why some previous works focus on learning or embedding hierarchical structures within the latent space [1, 2]. Furthermore, hierarchical relationships in real-world data may not always be represented by nested structures. As such, this assumption appears somewhat oversimplified. If the intention was to convey that the data does not inherently lie on a hierarchical manifold but must be trained to conform to one, it would be beneficial to clarify this in the writing.\n\n3. Since the hypothesis regarding hierarchical manifolds is not clear, it is difficult to understand how graph-diffusion inference can be leveraged even in the absence of HMCF (Figure 1(a)). In other words, what is the relationship between graph-diffusion inference and hierarchical manifold? Isn’t it possible to conduct graph diffusion-based inference using the relationships among all labels, even without the discussion about the hierarchical manifold?\n\n4. Some sections of the writing appear unorganized. For example, in Line 84, the abbreviation \"HMCF\" is introduced before it is defined. Additionally, in Line 188,Is “14,036” referring to the number of test samples or the total number of hierarchical labels? Making these points clearer would enhance the conciseness of the writing.\n\n\n\n**Originality** \n\n5. The hierarchical contrastive learning method that utilizes labels at all levels has been proposed previously [3], so its novelty here is somewhat limited. It would be helpful to clarify how the proposed approach differs from [3]. \n\n**Quality** \n\n6. Relatedly, there are many additional experiments in the Appendix that are not referenced in the main text (except for Table 5). If these experiments are relevant and essential to this method, it would be helpful to mention them in the main text so that readers can find them. \n\n7. Some important works seem to be missing from the Related Work section. In addition to top-down and bottom-up approaches, there are hierarchical classification studies that use multi-branch architectures to predict all levels simultaneously based on the complete label hierarchy [4, 5]. A comparison with these approaches would be helpful. These studies aim to address the question raised in Lines 163–164: “Can the predictions across different levels in the category hierarchy mutually reinforce each other to improve overall accuracy?” and are driven by similar motivations.\n\n\n8. As a minor point, citations would be easier to read if placed in parentheses.\n\n[1] HIER: Metric Learning Beyond Class Labels via Hierarchical Regularization, (CVPR, 2023) \n[2] Learning Structured Representations by Embedding Class Hierarchy, (ICLR, 2023) \n[3] Use All The Labels: A Hierarchical Multi-Label Contrastive Learning Framework (CVPR, 2022) \n[4] Your “Flamingo” is My “Bird”: Fine-Grained, or Not (CVPR, 2021) \n[5] Label Relation Graphs Enhanced Hierarchical Residual Network for Hierarchical Multi-Granularity Classification (CVPR, 2022)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The paper uses a generic prompt structure (“a photo of a {class}”) for training the VLM with HMCF. Did you test alternative prompt formulations?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper introduces a novel perspective on hierarchical classification by leveraging manifold learning through a graph diffusion-based inference approach. The combination of hierarchical multi-modal contrastive fine-tuning (HMCF) and graph diffusion is a creative."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the challenge of classifying images within a hierarchical labels, which prioritizes correct coarse labels when fine labels are difficult to predict accurately. The authors argue that standard fine-tuning approaches, which typically optimize models on fine classes using cross-entropy loss, may not fully leverage hierarchical structures. They propose the use of a graph diffusion-based inference strategy. The approach adjusts posterior probabilities across hierarchical levels, which differs from traditional top-down and bottom-up methods by treating the hierarchical taxonomy as a graph."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper could improve by including a more comparison with alternative hierarchical inference strategies beyond traditional top-down and bottom-up methods. \n2. the method depends on vision language models to fully leverage the hierarchical structure.\n3. There is not enough details on the graph diffusion process, and it should give more intuitive explanation of the mechanism."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* What is the difference between differentiable diffusion (DD) and diffusion (D)? Since the explanation for DD is in section 3.2, the diffusion-only case needs to be explained. \n* Based on the results in Tables 1 and 2, DD hardly affects AP and metrics for hierarchical classification, although it improves leaf classification. Why is this the case? \n* Based on the results in Table 3, most of the improvements are due to adding MCF on leaf classes and a minor improvement by adding hierarchy. Can you explain why this is the case? Since there is a discussion in the paper that fine-tuning just on the leaf level is not effective. \n\nSuggestions:\n\n* Adding more discussion to connect the two methods would be great.\n* It would be nice to show detailed performance based on each class to see the method's effectiveness in cases where distinguishing the hierarchical classes is more challenging."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The paper is well written.\n* The idea of hierarchical classification is exciting and important, especially in the application.\n* The inference strategy based on graph diffusion is novel for this task."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a hierarchical multi-modal contrastive for fine-tuning pre-trained vision language models. The authors claim the method can capture hierarchy more efficiently than top-down and bottom-up approaches. Furthermore, they introduce a graph diffusion algorithm for inference motivated by a hierarchical manifold. In the end, they compare the performance of their inference and fine-tuning strategy across different tasks and datasets with other methods and show the superiority of their method compared to them."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The discussion from lines 171-176 is confusing and not accurate since it is about the importance of incorporating the hierarchical nature of data in training and inference. Yet one of the paper's main contributions is an inference method to improve performance.\n* The paper lacks cohesion. It proposes two methods that are hardly connected. One is an inference method based on graph diffusion, and the other is a fine-tuning method for VLMs based on contrastive loss. \n* The numbers in all Tables don't have confidence intervals, so it is hard to grasp how significant the differences are. The authors should include confidence intervals or standard deviations from multiple runs."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- [Methodology] It is not clear how sections 3.2 (inference), and 3.3 (fine-tuning) come together in practice. Are these both applied? Do you first finetune using HMCF, and then apply the graph diffusion? Please elaborate this, perhaps in a pseudo code, algorithmic view. \n- How do you justify this swing of outperformance between D and DD across performance eval tables? Wouldn't it be possible to have one approach (a hybrid) that works best in most/all scenarios? If not, when to use which method? Please elaborate on this."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper reads reasonably well, and the narrative is coherent. \n- The experimental setup in comprehensive. \n- The results look promising compared to the considered baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a combination of a contrastive fine-tuning technique and a graph diffusion inference strategy to improve the downstream performance of hierarchical classification models. This is investigated across two main benchmarks and several settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- [Narrative] Some key points are repeatedly mentioned over and over again throughout the paper (eg. top-down, bottom-up, why they work and don't and why one outperforms the other and even why the proposed approach is better). Overall, the paper can be simplified/shortened to go straight to the point. I understand these are core points to take home, but stating them so many times might not be helpful nor efficient. While on the same topic, several grammatical errors and typos can be found throughout the paper, please make sure you give it another good proof-read. \n- [Relevance] Most baselines and references upon which the problem definition and its significance are built date back to 2 to 5 years ago. Whether hierarchical classification is still such an important problem can be questioned in that light. My suggestion would be to definitely look for more recent literature and baselines that accentuate on the relevance and importance of the problem. \n- [Proof] I'd suggest adding a proof that walks the reader from eq. (2) to (4). It is not that straightforward. \n- [General] I feel using \"graph diffusion\" might be confusing as most recently there is a new branch in literature that focuses on using Diffusion models (in computer vision) over graphs. I'm not sure though how this could be addressed, please give it a thought."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hierarchical,\ntitle={Hierarchical Classification via Diffusion on Manifolds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zJbwrk1DHc},\nnote={under review}\n}"
},
"abstract": {
"value": "Hierarchical classification, the problem of classifying images according to a predefined hierarchical taxonomy, has practical significance owing to the principle of ``making better mistakes'', i.e., better to predict correct coarse labels than incorrect fine labels. Yet, it is insufficiently studied in literature, presumably because simply finetuning a pretrained deep neural network using the cross-entropy loss on leaf classes already leads to good performance w.r.t not only the popular top-1 accuracy but also hierarchical metrics. Despite the empirical effectiveness of finetuning pretrained models, we argue that hierarchical classification could be better addressed by explicitly regularizing finetuning w.r.t the predefined hierarchical taxonomy. Intuitively, with a pretrained model, data lies in hierarchical manifolds in the feature space. Hence, we propose a hierarchical multimodal contrastive finetuning method to leverage taxonomic hierarchy to finetune a pretrained model for better hierarchical classification. Moreover, the hierarchical manifolds motivate a graph diffusion-based method to adjust posteriors at hierarchical levels altogether in inference. This distinguishes our method from the existing ones, including top-down approaches (using coarse-class predictions to adjust fine-class predictions) and bottom-up approaches (processing fine-class predictions towards coarse-label predictions). We validate our method on two large-scale datasets, iNat18 and iNat21. Extensive experiments demonstrate that our method significantly outperforms prior arts w.r.t both top-1 accuracy and established hierarchical metrics, thanks to our new multi-modal hierarchical contrastive training and graph-diffusion-based inference."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"hierarchical classification",
"graph diffusion"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5ce3a44c17a014dbee2eed3e2a62e05ee900cbab.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Hierarchical Classification via Diffusion on Manifolds"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zJfOyS1YLW | On-Policy Policy Gradient Reinforcement Learning Without On-Policy Sampling | main | Active | reinforcement learning;on-policy;policy gradient;data collection | reinforcement learning | 3;5;5;8 | 5;5;4;4 | 2;3;2;3 | 2;2;3;3 | 2;3;3;3 | 5.25 | 4.5 | 2.5 | 2.5 | 2.75 | -0.70014 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "What is the final choice of the batch size for PPO? Only candidates 1024, 2048, 4096, and 8192 are reported."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Strengths:\n1. Estimating the on-policy gradient requires on-policy samples. Because we do not have access to the true gradient, using the target policy to draw on-policy samples to estimate the true gradient has an estimation error. This error is caused by under-sampled or over-sampled data. Encouraging under-sampled on-policy samples reduces the sampling error and improves gradient estimation. This paper is the first to apply this idea to the PPO algorithm.\n2. This paper proposes an algorithm to learn the behavior policy to encourage data in PPO algorithms."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the problem that on-policy sampling may have sample error throughout the sampling process. Such sampling errors could be remedied by sampling using a different policy. This paper applied this idea to PPO by remediating the sampling error inside each minibatch. Specifically, this paper divides the minibatch of the PPO into more fine-grained nano batches and adjusts the sampling policy based on sampled nano batches to encourage under-sampled samples. This paper claims their results are better than PPO."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n1. This paper adjusts the batch size of PPO to \"1024,2048,4096,8192\" which is much larger than the original batch size (64 samples) of PPO. This paper makes this change because they need to learn a behavior policy to encourage under-sampled data at each update step. In other words, they run a mini-PPO to encourage under-sampled data by finding a behavior policy. This drastic increase in batch size is not well-justified. In fact, it is confusing that their PPO algorithm still achieves the comparable performance of the original PPO with at least 16 times less update due to the batch size increase. I am not sure if the original PPO paper has such large room to change the batch size to 16 times larger while achieving the same performance under the same hyperparameters.\n2. This paper is not well-written. The length of this paper can be greatly reduced. Currently, this paper has too many unnecessary examples and sentences. The theoretical inside of this paper is also weak. I think a good improvement direction is to discuss the relationship between the sampling error and the number of samples and how this paper can remediate the sampling error from the theoretical perspective. Some new interesting findings might also be discovered during this process.\n3. This paper also introduces many fragile hyperparameters. For example, the learning rate for its proposed behavior policy ranges from 10^{-3} to 10^{-5} for an ADAM optimizer. Such a large difference in learning rate is not common for the ADAM optimizer. Moreover, they also require KL cutoff and regularizer coefficient inside the mini-PPO to encourage under-sampled data, which are two more dimensions of hyperparameters."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. As a follow-up insight, there may be cases where we can improve the sampling error in one state, but this will cause worse sampling errors down the road because we will be forced to go into specific states. Wouldn't it make more sense to push this sampling error into the reward instead, and form a suitable policy? if you think this is not an issue, it would be helpful if you can explain why.\n\n2. Something doesn't sit right with me regarding equation 6 and its motivation. If we are only aiming to improve the sampling, in the discrete case we wouldn't care about the distance from the target policy. If we are aiming to also improve control, then most algorithms prevent policy changing too much to improve stability in which case we don't need this additional term (for example in PPO). So is this specifically for the continuous case? It does not make a lot of sense to me, at least according to the examples for the discrete case. I would be happy if you could elaborate on this point. \n\n3. Why did you chose the form given in Equation 5? why not its log or difference? Some explanation, ideally guided by theory would be helpful (it is the importance sampling term, widely used in off-policy learning, so it should make some sense using it in some variation)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The sampling errors problem the authors tackle is interesting, there has not been sufficient discussion on this topic in the RL community. \n\n2. The presentation and writing is generally clear. \n\n3. The sampling errors problem can arise in many real-world setups. \n\n4. The proposes solution is simple in a good way."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors tackle the problem of sampling errors from insufficient data such that the empirical data has some discrepancy with desired policy. The authors propose a method to adaptively adjust the policy to resolve this discrepancy. They show their method works in several test cases including control and continuous action space."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The experiments and motivation the authors provided are not accommodating for the problem. When is this really a problem in the real-world? In most standard examples there is ample time to correct the sampling errors with more data and simulation (this happens with a not a lot of samples in practice). Perhaps in cases of non-stationarity of the system itself this \"quicker\" adaptation become crucial? or maybe for very large state\\action-spaces this is more of a problem. \n\nI propose the authors will try to distill where they're problem is really important and design experiments correspondingly, or at least motivate more strongly towards these cases.\n\n2. Even in a quick look, Equation (5) can be rewritten much simpler to be -max(g, 1-eps). I'm not sure forcing it to look like PPO helps with insight, but it is confusing and unnecessary. That is, unless you formulate it similarly to the advantage.\n\n3. Some questions were raised for me regarding the soundness of the proposed solution, see in the questions section."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I have only one question:\n\nIn Zhong et al., the policy is updated at every step in an online fashion: to me, this makes a lot of sense, since we want constantly to \"correct\" the sampling distribution. Ideally, one wants to employ the method you describe in Proposition 1. \n\nHowever, if I understand well, you propose first to use the target policy to collect the samples and, afterward, to generate a policy that \"corrects\" the previous behavior. However, such a policy is not well defined: it depends on how many samples will be drawn from such a policy. For example, suppose that the target policy is the distribution $\\pi(a=0|s) = 0.5$ and $p(a=1|s)=0.5$. Suppose then that the samples drawn from this distribution are:\n\n0, 0, 0, 0, 0.\n\nThe policy that you want to learn should then put much more probability density on 1 to compensate, but if we sample hundreds of samples with this new policy, then the dataset will be again \"unbalanced\". In my understanding, the idea of Zhong et al. to continuously update the policy is to avoid this issue.\n\nWhy did you propose this \"batch update\" rather than an \"online update\"?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality\n-------------\n\nThe idea proposed is novel. Although the method proposed until page 5 is from Zong's paper, and the application to policy gradient estimation seems somewhat trivial, I think it is still valuable and necessary.\n\nQuality\n----------\n\nThe quality of the paper is really good. The authors explain the problem and the main idea very well. The method is sounds and directly addresses the problem presented. The experiments are well-designed, explained, and commented on. The results are equipped with good statistical significance, and the appendix reports the hyperparameters necessary for reproducibility. \n\nClarity\n--------\n\nThe paper is well-written, and the overall idea is exposed very clearly.\n\nSignificance\n-----------------\n\nIt is hard to assess the significance of this paper: 1) it heavily relies on a previous publication, and 2) it does not seem that the method improves so much over the baselines. However, I think the core idea is significant as it can serve as a source of inspiration for other RL problems, as MC is a core technique in RL to estimate many different quantities."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tackles the usual problem of the high-sample complexity of on-policy policy gradient algorithms by refining a very interesting idea proposed by Zhong et al. in 2022. In that paper, the authors observe that when the number of samples is scarce, the samples might resemble the on-policy distribution poorly, which would be equivalent, in a sense, to dealing with an off-policy distribution. In more simple terms, a low number of samples usually cause a high variance policy gradient estimation. \n\nZhong et al. observe that on-policy samples do not necessarily need to come from the on-policy distribution; for example, the samples\n\nx_i = 0, 1, 1, 0 \n\ncan come from p(x=1) = 0.5 and p(x=0)=0.5, but also from q(x=1) = 0.4 and q(x=0) = 0.6. \nSimilarly, a batch of samples that look very off-policy might actually have been generated from the on-policy distribution, for example\n\nx_i = 1, 1, 1, 1, \n\nmight have been generated from the policy $p(x=0) = 0.5$, $p(x=1) = 0.5$. \nMonte-Carlo estimation (i.e., $E[X] \\approx 1/n \\sum_i X_i$) has higher variance exactly due to the issue described above, when the number of samples $n$ is low, while the variance disappears for $n\\to\\infty$, making it a consistent estimator.\n\nThe variance is the main problem affecting MC estimators and, thus, policy gradient algorithms as well (which are all MC estimators, as they attempt to estimate a respected value using $1/n \\sum_i X_i$).\n\nThe variance of MC estimators can be ideally reduced if small batches of samples seem to better adhere to the target distribution. This can be achieved by breaking the i.i.d sampling assumption. For example, when the target distribution is $p(x=0) = 0.5$, $p(x=1) = 0.5$, One can choose a deterministic policy that alternates 0s and 1s. Such policy makes the MC estimator strictly more efficient. In this sense, **an off-policy distribution (that violates the i.i.d assumption) can generate better samples \"on-policy\" samples than the target distribution.**\n\nZhong et al. in 2022 propose to learn a policy that aims to lower the variance of the policy gradient estimators by generating samples that adhere better to the target policy distribution.\n\nThe contribution of this ICLR 2025 paper is to use the idea of Zhong et al. for on-policy policy gradient estimation (and policy improvement), while in the original publication, the idea was only proposed for policy evaluation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Contribution\n-----------------\n\nThe paper's contribution starts on page 5: the author focuses on presenting Zhong et al.'s paper there. The core contribution is the application of Zhon's idea for PG estimation and the use of PPO clipping and KL divergence to prevent too aggressive updates. \n\nWhile I think it is necessary that the authors devote that space to expose Zhong et al.'s idea, I think that it might not be clear to the reader that **that is not** the main core idea of the paper. \n\nClarity\n--------\n\nWhile the paper is well written overall, I do not always like the choice of words and how the problem is described. \nI find talking about on-policy \"data\" vs. \"sampling\" confusing. A sample is not per se on or off-policy. When we speak about \"on-policy\" and \"off-policy\" in estimation, we refer to the generating process (i.e., the distribution) that generates the samples. A sample X_i can be generated by an off-policy distribution $\\beta$ and simultaneously very unlikely w.r.t. $\\beta$ and very likely w.r.t. the target distribution $\\pi$, but that does not make the sample on-policy (or off-policy). Policy gradient estimation is simply off-policy when the distribution that is used to generate the samples is off-policy. \n\nPairwise, I find speaking of \"empirical policy\" as in Proposition 1 confusing. One thing is samples, and one thing is the distribution. What can be told, in general, is that if $n > m$, then $Var[1/n \\sum_i^n X_i] < Var[1/m \\sum_i^m X_i]$, where X_i are samples coming from the same distribution. Now, it would be possible to introduce a new distribution (as done in Proposition 1), and show that the variance of the batch of samples decreases even faster in $n$, when compared to the original distribution. \n\nOverall, I am conscious that much of the \"nomenclature\" is borrowed from Zhong et al., thus the authors might have preferred to keep consistency with that. \n\nMinor Comments\n-----------------------\n\nEquation 1 is unclear as it is written: s_0 comes from a different distribution than s_1, which comes from a different distribution than s_2, and so on. Writing $s \\sim d_\\pi$ gives the impression that all states are drawn from the same distribution. Usually, there are two ways to express the policy's return,\n\n1. \n\n$$\nJ(\\theta) = \\mathbb{E}_{s \\sim d^\\gamma_\\pi, a \\sim \\pi}\\left[r(s, a)\\right]\n$$\n\n where $d^\\gamma_\\pi$ is the discounted state-action visitation (see Nota and Thomas \"Is policy gradient a gradient?\" and Sutton et al. \"Policy gradient with function approximation\")\n\n2. \n\n$$\nJ(\\theta) = \\mathbb{E}\\left[\\sum_{t=0}^\\infty \\gamma^t r(s_t, a_t)\\right]\n$$\n\nwhere $s_0 \\sim d_0, a_t \\sim \\pi(\\cdot | s_t), s_{t+1} \\sim p(\\cdot | s_t, a_t)$.\n\nEquation 2 and 4, should be with the discounted state visitation (see Nota and Thomas \"Is policy gradient a gradient?\" and Sutton et al. \"Policy gradient with function approximation\" :D )\n\nReferences\n---------------\n\nNota, C., & Thomas, P. S. (2019). Is the policy gradient a gradient?. arXiv preprint arXiv:1906.07073.\n\nSutton, R. S., McAllester, D., Singh, S., & Mansour, Y. (1999). Policy gradient methods for reinforcement learning with function approximation. Advances in neural information processing systems, 12. (**see page 3, definition of $d^\\pi$**)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "**[Q1]** What do the authors believe are the main benefits of PROPS relative to existing methods that address data or computational efficiency, including: \n- Methods that reuse data by making off-policy corrections (references in lines 122-124): These algorithms appear to be the most similar to PROPS, and some of them introduce similar levels of added complexity / runtime when compared to on-policy algorithms. However, they are not compared against in the experiments.\n- Off-policy RL algorithms: These methods have been shown to be extremely data efficient and can even be used to train complex tasks directly from real-world data (e.g., [1]). They have also been shown to significantly outperform on-policy methods on some of the MuJoCo tasks used in this paper.\n- Massively parallel on-policy implementations: When a simulator is available, it has been shown that massively parallel implementations of on-policy algorithms such as PPO can result in very efficient RL training in terms of wall-clock time (e.g., [2]). \n\n**[Q2] Experiments**\n- The use of different hyperparameter values across each algorithm makes the comparison of PROPS and PPO difficult to interpret. I appreciate the value in the best vs. best comparisons presented in the paper, but how does PROPS perform when using the same hyperparameter values as PPO (and only tunes the PROPS-specific hyperparameters)?\n- It is not obvious to me that reducing sampling error will necessarily lead to improved performance. Given the non-convex objective of policy gradient methods, noisy updates could even be helpful to avoid / escape local optima. Did you perform any experimental analysis on this connection (e.g., comparing PPO for the same number of total updates but varying batch sizes)? A comparison of PPO and PROPS for $b=1$ (with the same PPO hyperparameters) would also provide some insight into this question.\n- It seems that “on-policy sampling” refers to PPO-Buffer in the sampling error figures, which does not seem like a fair comparison because PPO-Buffer does not attempt to correct for the use of off-policy data at all. What is the sampling error of standard on-policy PPO (and how does it compare to PROPS for $b=1$ with the same PPO hyperparameters)? Are similar trends observed?\n\n**Minor:**\n- In (1), the combination of an expectation over the visitation distribution and a summation over time inside the expectation is a bit strange. When summing over time inside the expectation, the expectation is typically written w.r.t. trajectories rather than the visitation distribution.\n- Equation (5) can be written more directly as $\\min(-\\pi_{\\phi} / \\pi_{\\theta}, -(1-\\epsilon_{\\textnormal{PROPS}}))$. The more complicated structure currently being used is needed in PPO’s objective because advantages can be positive or negative, but this is not relevant in (5).\n- Why is IQM used as the performance metric? Typically the mean return is reported.\n- Typos: line 341: Samping -> Sampling, line 485: Figure 5b -> Figure 3b\n- Some of the legends appear inconsistent with other figures (Figure 12) / with the figure caption (Figure 15)\n\n---\n\n**References:**\n\n[1] Smith et al. Demonstrating a Walk in the Park: Learning to Walk in 20 Minutes With Model-Free Reinforcement Learning. In RSS 2023.\n\n[2] Rudin et al. Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning. In CoRL 2022."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- **[S1] Clear and well-written:** The paper is easy to read. The authors clearly present the interesting idea of on-policy data vs. on-policy sampling, as well as the proposed PROPS algorithm. \n- **[S2] Detailed, transparent presentation of experimental implementation:** The authors do a nice job of clearly describing the implementation details used in the experiments. This includes supporting analysis in the Appendix showing results across hyperparameter values."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Proximal Robust On-Policy Sampling (PROPS), an adaptive off-policy sampling method designed to reduce sampling error and improve data efficiency in on-policy policy gradient algorithms. The paper generalizes the adaptive off-policy sampling method ROS introduced in Zhong et al. (2022) by proposing regularization techniques to handle continuous action spaces, and incorporates PROPS into RL training with PPO. Experiments on GridWorld, other discrete-action tasks, and MuJoCo benchmarks compare PPO with PROPS sampling to standard on-policy PPO and a version of PPO that naively reuses recent data without off-policy corrections."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**[W1] Lack of novel theoretical analysis**\n- While Proposition 1 is valuable because it is not obvious that this sampling procedure converges to the correct state visitation distribution, it is a slight generalization of a result that already appears in Zhong et al. (2022). It is also restricted to finite states and actions, and is based on a different sampling procedure than what is proposed in Section 5. \n- Proposition 1 only analyzes what happens in the limit of infinite data, but the transient behavior is important in the context of policy gradient algorithms where a finite (and often limited) amount of data is collected between each policy update.\n- It seems that there would likely be an important interplay between the size of policy updates, the amount of data to reuse, and the size of behavior policy updates determined by the PROPS objective. Unfortunately, the paper does not perform any analysis to connect these components, and instead sets all hyperparameters using a hyperparameter sweep. \n\n**[W2] Experiments do not demonstrate convincing performance benefits**\n- The experimental results suggest that PROPS can achieve slightly better sampling error during RL training, but these improvements only lead to marginal performance benefits in continuous control MuJoCo tasks. \n- Experiments only compare against PPO and a version of PPO that naively reuses recent data (PPO-Buffer). There have been several works that improve data efficiency in a more principled way than PPO-Buffer that are similar to the goals of this work (see references in lines 122-124), but none of these are considered as baselines. Off-policy RL algorithms are also not included for comparison, which can significantly outperform on-policy or near on-policy methods in some of the tasks considered in this work.\n- Due to the regularization needed for stable performance, PROPS requires the behavior policy to remain close to the current policy (small $\\delta_{\\textnormal{PROPS}}$). The experimental results suggest that this prevents PROPS from reusing a significant amount of past data (as mentioned in lines 470-471 and 1181-1183), limiting the data efficiency that can be achieved.\n\n**[W3] Contribution is not clear relative to existing methods:** The paper does not clearly position its contribution relative to existing RL methods that also address data or computational efficiency. See [Q1] below."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a non-i.i.d., off-policy sampling method to produce data that more closely matches the expected on-policy data distribution than on-policy sampling can produce, thus improving the data efficiency of on-policy policy gradient algorithms."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024onpolicy,\ntitle={On-Policy Policy Gradient Reinforcement Learning Without On-Policy Sampling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zJfOyS1YLW},\nnote={under review}\n}"
},
"abstract": {
"value": "On-policy reinforcement learning RL algorithms perform policy updates using i.i.d. trajectories collected by the current policy. However, after observing only a finite number of trajectories, on-policy sampling may produce data that fails to match the expected on-policy data distribution. This sampling error leads to noisy updates and data inefficient on-policy learning. Recent work in the policy evaluation setting has shown that non-i.i.d., off-policy sampling can produce data with lower sampling error than on-policy sampling can produce~\\citep{zhong2022robust}. Motivated by this observation, we introduce an adaptive, off-policy sampling method to improve the data efficiency of on-policy policy gradient algorithms. Our method, Proximal Robust On-Policy Sampling (PROPS) reduces sampling error by collecting data with a behavior policy that increases the probability of sampling actions that are under-sampled with respect to the current policy. Rather than discarding data from old policies -- as is commonly done in on-policy algorithms -- PROPS uses data collection to adjust the distribution of previously collected data to be approximately on-policy. We empirically evaluate PROPS on both continuous-action MuJoCo benchmark tasks as well discrete-action tasks and demonstrate that (1) PROPS decreases sampling error throughout training and (2) improves the data efficiency of on-policy policy gradient algorithms. Our work improves the RL community’s understanding of a nuance in the on-policy vs off-policy dichotomy: on-policy learning requires on-policy data, not on-policy sampling."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"reinforcement learning",
"on-policy",
"policy gradient",
"data collection"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b0860f2fd30166c07622fe9adc01e1c035fdebcc.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/f97b624cc2f76b646ab962b7d189665661bc29fd.zip"
},
"title": {
"value": "On-Policy Policy Gradient Reinforcement Learning Without On-Policy Sampling"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zJjzNj6QUe | RocketEval: Efficient automated LLM evaluation via grading checklist | main | Active | automated evaluation;large language models;natural language processing | applications to computer vision, audio, language, and other modalities | 5;5;6;6 | 4;4;4;3 | 3;3;3;2 | 2;2;3;2 | 4;3;4;3 | 5.5 | 3.75 | 2.75 | 2.25 | 3.5 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In Table 3, human-human agreement seems very low. Does this saturate the performances above 60% to some extent?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The authors core objective is important and compellingly achieved: create a lightweight model judge with high performance. Their use of a dynamic checklist, and normalized score predictions both show notable improvements in scoring, from their experiments.\n* The authors unpack the issues with existing lightweight model judges, related to their analyses, position bias and uncertainty, that could be informative to other solutions as well.\n* The comprehensively compare a series of model judges in different settings to understand how they can best be optimized. Their quantitative analysis and ablations are particularly informative."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose RocketEval, a system comprised of API calls and lightweight models, performing as a model judge. The system is designed to be minimally computationally expensive while achieving comparable human agreement scores to GPT4o on MT-Bench and WildBench. Their subsequent ablations and analysis inform how the system can achieve these efficiency gains."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Their explanation for lightweight model “analysis” being weak could be framed more conservatively. If I understand correctly, these experiments more show that lightweight models find classifying GPT4o reviews into scores as an easier task than predicting the score themselves. It wouldn’t necessarily naturally follow that a GPT4o checklist would be the obvious solution, but rather a GPT4o reasoning.\n* It isn’t immediately apparent why GPT4o is so much more expensive than the RocketEval (which includes GPT4o calls). This should be more closely explained. Is it because the reasoning is super long? If so, it would be beneficial to see harder baselines (in terms of efficiency), e.g. prompting GPT4o to produce shorter CoT explanations before its score prediction."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Could authors explain their framework's relation and difference with paper [1] ?\n- RocketEval can enable lightweight models the ability to judge large models, is this similar to the idea of weak-to-strong supervision? Please explain the relation and difference between your paper's ideas and weak-to-strong [2].\n- Conducting experiments solely on a total of 1104 data from MT-BENCH and WILDBENCH is not convincing enough. Could authors conduct experiments on more datasets?\n\n[1] Ribeiro, Marco Tulio, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. \"Beyond accuracy: Behavioral testing of NLP models with CheckList.\" arXiv preprint arXiv:2005.04118 (2020).\n\n[2] Burns, Collin, Pavel Izmailov, Jan Hendrik Kirchner, Bowen Baker, Leo Gao, Leopold Aschenbrenner, Yining Chen et al. \"Weak-to-strong generalization: Eliciting strong capabilities with weak supervision.\" arXiv preprint arXiv:2312.09390 (2023)."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Originality: The paper presents a novel evaluation framework, RocketEval, which evaluates LLMs by grading checklist. This approach is distinct from existing methods like multiple-choice questions. However, I believe the idea of using checklist to evaluate NLP models was actually proposed by another paper [1] in 2020.\n- Quality: The analysis of the lightweight LLMs' abilities in section 2 provides valuable insights into the framework’s effectiveness.\n- Clarity: The paper is well-organized and clearly structured, with each section logically following the previous one.\n- Significance: The framework’s high agreement with human judgments and significant cost reduction make it a promising solution for large-scale LLM evaluations.\n\n\n[1] Ribeiro, Marco Tulio, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. \"Beyond accuracy: Behavioral testing of NLP models with CheckList.\" arXiv preprint arXiv:2005.04118 (2020)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes RocketEval, a novel framework for efficiently evaluating large language models (LLMs) using lightweight LLMs as judges. The framework addresses the limitations of existing automated evaluation methods, such as high costs and lack of interpretability, by using checklist grading. This paper trains lightweight LLMs to independently evaluate each checklist item, providing a multifaceted and unbiased judgment."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper primarily focuses on evaluating responses to queries, as the checklist is generated by the input query. However, LLMs are used in a wide range of applications, including text summarization, and translation, where evaluation metrics based on queries may not be efficient since the input query may be very long.\n- Since authors employ GPT-4o as the checklist creator, while the paper argues that checklist creation is a one-time process, the cost of using a powerful LLM like GPT-4o for this task could be significant, especially for large-scale evaluations. Exploring more efficient methods for checklist creation would be beneficial.\n- Typo: Line 215: We select responses from 12 test models and compare the benchmark results when using *GPT-4o* and *Claude-3.5-Sonnet* as judges as strong baselines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Clear Motivation: The author clearly explains the motivation of the paper in the introduction. I particularly agree with the point raised about the shortcomings of \"Fine-Tuned Judge Models,\" as these models often lack understanding of complex instructions (although the author did not provide proof for this claim).\n- Overall Well-Written: The paper is generally well-written.\n- Relevant Problem: The paper raises a highly relevant problem that urgently needs to be addressed in the current landscape of LLM evaluation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces RocketEval, a lightweight and automated evaluation method for LLMs that addresses the high costs and privacy concerns of using powerful LLMs as judges. By reframing evaluation tasks into a Q&A format using instance-specific checklists, RocketEval improves the judgment accuracy of smaller models, overcoming issues like uncertainty and positional bias."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Regarding Section 2.4: While information entropy is a good metric, it is not very intuitive. I recommend that the author use self-consistency (sampling n times and calculating consistency) to demonstrate this uncertainty.\n- Limited Advantage Over GPT-4o-Mini: Although RocketEval shows a clear advantage over GPT-4o, its superiority over GPT-4o-mini is less significant. Additionally, the N=1000 setting is impractical, as it is rare for someone to test 1000 models simultaneously.\n- Missing Ablation Studies: The paper lacks ablation studies for two components: Independent Checklist Item Judgment and Normalized Probability.\n- Comparison with Other Models: I also suggest that the author provide comparisons with other Fine-Tuned Judge Models, such as the Prometheus series.\n- Minor Issues: In lines 289-290, the author mentions that reference responses and hand-crafted rubrics may not work well in all cases. Please provide examples to illustrate this. Figure 10 is confusing—why does the first row have three models, while the following subplots have only one model per row?\n\nOverall, I believe the problem and methods proposed in the paper are valuable, but there is room for improvement. I hope the author can address my concerns during the rebuttal stage."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "How does the method compare to other approaches, such as [1, 2], as well as fine-tuned judge LLMs?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- I enjoyed reading the paper, especially Section 2, as the problem is well-motivated and the charts effectively convey the key messages.\n- The idea is simple yet smart. By decomposing a single (potentially difficult) evaluation task into multiple simpler items in a checklist, lightweight LLMs can be leveraged to perform the evaluation. More importantly, checklist generation is largely a one-time effort, which could make LLM evaluation more economical in everyday LLM development environments.\n- The authors have thoroughly evaluated the proposed method from multiple perspectives, including agreement, cost, and a qualitative study."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose RocketEval to address the high costs, privacy concerns, and reproducibility challenges associated with using powerful LLMs like GPT-4o for evaluation purposes. RocketEval leverages lightweight LLMs combined with a multi-faceted checklist approach to improve judgment accuracy. Experimental results from benchmarks like MT-BENCH and WILDBENCH demonstrate that RocketEval achieves a high correlation with human evaluations, comparable to GPT-4o, while reducing costs by more than 50-fold."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Some aspects of the design lack scientific rigor. For instance, the estimation of “Normalized Probability” does not appear to be justified. Why would it provide an unbiased (or even plausible) probability? I suggest simply referring to it as a “score” instead of “probability”.\n- Direct evaluation using lightweight LLMs is critiqued for high uncertainty and position bias. However, the authors fail to provide a comparison in this regard during the evaluation. Could this issue be addressed by bias mitigation algorithms [1, 2] at an even lower cost? What is the unique advantage offered by the checklist-based method compared to those approaches?\n- The authors mentioned the potential benefits of using prefix caching for cost savings. However, this is not reflected in Section 4.2, even though this feature is already available through OpenAI.\n- In Section 4.2, the authors quote the cost from `vast.ai`, a bidding-based crowd-sourced GPU rental platform. The quoted price ($0.8/hr) is the minimum price for renting an A100 card according to `https://vast.ai/pricing` (why not use the median price?). This might be unfair for several reasons. First, it is unlikely that $0.8/hr represents the regular price. More importantly, OpenAI offers production-level stability, which `vast.ai` does not. A fair comparison would be to quote the price from a public cloud provider like AWS.\n\n[1] Large language models are not fair evaluators\n\n[2] Split and merge: Aligning position biases in large language model based evaluators"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "A simple, replicable,interpretable, and accurate automated evaluation method that uses lightweight LLMs as judges to efficiently assess various scenarios and questions."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024rocketeval,\ntitle={RocketEval: Efficient automated {LLM} evaluation via grading checklist},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zJjzNj6QUe},\nnote={under review}\n}"
},
"abstract": {
"value": "Evaluating large language models (LLMs) in diverse and challenging scenarios is essential to align them with human preferences. To mitigate the prohibitive costs associated with human evaluations, utilizing a powerful LLM as a judge has emerged as a favored approach. Nevertheless, this methodology encounters several challenges, including substantial expenses, concerns regarding privacy and security, and reproducibility. In this paper, we propose a straightforward, replicable, and accurate automated evaluation method by leveraging a lightweight LLM as the judge, named RocketEval. Initially, we identify that the performance disparity between lightweight and powerful LLMs in evaluation tasks primarily stems from their ability to conduct comprehensive analyses, which is not easily enhanced through techniques such as chain-of-thought reasoning. By reframing the evaluation task as a multi-faceted Q\\&A using an instance-specific checklist, we demonstrate that the limited judgment accuracy of lightweight LLMs is largely attributes to high uncertainty and positional bias. To address these challenges, we introduce an automated evaluation process grounded in checklist grading, which is designed to accommodate a variety of scenarios and questions. This process encompasses the creation of checklists, the grading of these checklists by lightweight LLMs, and the reweighting of checklist items to align with the supervised annotations. Our experiments carried out on the automated evaluation benchmarks, MT-Bench and WildBench datasets, reveal that RocketEval, when using $\\textit{Gemma-2-2B}$ as the judge, achieves a high correlation (0.965) with human preferences, which is comparable to $\\textit{GPT-4o}$. Moreover, RocketEval provides a cost reduction exceeding 50-fold for large-scale evaluation and comparison scenarios."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"automated evaluation",
"large language models",
"natural language processing"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1253b5d0c183f9b66e90566f9e5cfdf3d1c6d306.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/4f6c277c069d7d67c2cadabc8c18e84a843f662f.zip"
},
"title": {
"value": "RocketEval: Efficient automated LLM evaluation via grading checklist"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zKFUNRH0hN | Leveraging Modality Tags for Enhanced Cross-Modal Video Retrieval | main | Active | Video Understanding | applications to computer vision, audio, language, and other modalities | 3;3;6;8 | 4;5;5;3 | 3;2;3;4 | 2;2;3;3 | 2;1;3;3 | 5 | 4.25 | 3 | 2.5 | 2.25 | -0.568535 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Have the authors thought about ways to handle observed instances of hallucinated tags? And currently, how are these affecting retrieval accuracy?\n2. Was a quantitative evaluation considered for the quality and relevance of extracted tags maybe like a comparison with human annotated tags?\n\nPlease also refer to weaknesses for other questions."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "1. The paper introduces an approach to use foundational models to extract modality-specific tags for both videos and text, this is a novel approach to enhance cross-model alignment.\n2. Extensive experiments are conducted on three diverse datasets which cover a wide range of video retrieval tasks, comparison of MAC-VR against a strong baseline with SOTA methods and different inference strategies are documented.\n3. The authors have documented extensive ablation studies for validating the contribution of each components such as the effect of different numbers of tags, foundation models and architectural choices on retrieval performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "1. The authors in the paper introduce Modality Auxiliary Concepts for Video Retrieval, a novel approach that aims at improving alignment between video and text for effective video retrieval.\n2. This work provides a framework for extracting and utilizing modality-specific tags using foundational models from video and text and an alignment loss is introduced to align modality-specific auxiliary concepts with visual and textual latent concepts. \n3. In the paper they discuss how the architecture is tested with various inference strategies achieving competitive results across three benchmark datasets and ablation studies further validate the impact of each component."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper does not cover the quality and the diversity of the tags extracted. An evaluation of tag relevance and coverage could enhance the contribution.\n2. The paper has limited analysis of the individual contributions of video tags and text tags within the MAC-VR framework. Understanding the contribution of modality-specific tags to overall alignment can maybe help to identify the potential areas for improvement."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. The novelty of the paper is insufficient. It only uses LLMs to obtain the tag information of text and video to enhance the performance of the model. In addition, based on the introduction of additional data and large model knowledge, our model still lags far behind the state-of-the-art methods including T-MASS as shown in Table 3.\n2. The performance of the proposed method in this paper lags significantly behind the state-of-the-art (SOTA) techniques. Upon examining Table 3, it is evident that the proposed MAC-VR method falls behind T-MASS in terms of R@5 and R@10 for the MSRVTT dataset. For DiDeMo, MAC-VR also underperforms T-MASS across R@1, R@5, R@10, and MeanR metrics. The disparity in performance is notably substantial. Also, for ActivityNet, MAC-VR also falls behind T-MASS at R@1 and R@5 by a large margin.\n3. The logic in L53 is strange. The authors explain the effectiveness of different inference strategies which seems to have nothing to do with the auxiliary concept loss function described later.\n4. Some important papers need to be referenced and compared, including:\n[1] Clip-vip: Adapting pretrained image-text model to video-language representation alignment. ICLR 2023\n[2] Uatvr: Uncertainty-adaptive text-video retrieval. ICCV 2023\n5. The writing of the paper is not professional and needs further improvement. The symbols of this paper should be consistent, such as QB in L157 vs \\textit{QB} in L158, maybe $\\in$in L140, et al.\n6. The second term in Eq. 5 should also be preceded by a weight parameter to control it, and the selection of its parameters should be verified through experiments.\n7. The authors do not define the K in Eq. 4. Besides, the K in Eq. 4 seems to be different from the K in L323, so I suggest the authors use different symbols to distinguish them."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper introduces Modality Auxiliary Concepts for video retrieval, a novel approach that leverages modality-specific tags to enhance video retrieval\n2. The authors propose to extract modality-specific tags from foundational VLMs and LLMs to augment the video and text modalities.\n3. The authors propose a new Alignment Loss to better align and distinguish these learnt latent concepts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Modality Auxiliary Concepts for video retrieval, a novel approach that leverages modality-specific tags to enhance video retrieval. However, the novelty of the paper is insufficient. It only uses a large model to obtain the tag information of text and video to enhance the performance of the model. In addition, based on the introduction of additional data and large model knowledge, our model still lags far behind the state-of-the-art methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The novelty of the paper is insufficient. It only uses LLMs to obtain the tag information of text and video to enhance the performance of the model. In addition, based on the introduction of additional data and large model knowledge, our model still lags far behind the state-of-the-art methods including T-MASS as shown in Table 3.\n2. The performance of the proposed method in this paper lags significantly behind the state-of-the-art (SOTA) techniques. Upon examining Table 3, it is evident that the proposed MAC-VR method falls behind T-MASS in terms of R@5 and R@10 for the MSRVTT dataset. For DiDeMo, MAC-VR also underperforms T-MASS across R@1, R@5, R@10, and MeanR metrics. The disparity in performance is notably substantial. Also, for ActivityNet, MAC-VR also falls behind T-MASS at R@1 and R@5 by a large margin.\n3. The logic in L53 is strange. The authors explain the effectiveness of different inference strategies which seems to have nothing to do with the auxiliary concept loss function described later.\n4. Some important papers need to be referenced and compared, including:\n[1] Clip-vip: Adapting pretrained image-text model to video-language representation alignment. ICLR 2023\n[2] Uatvr: Uncertainty-adaptive text-video retrieval. ICCV 2023\n5. The writing of the paper is not professional and needs further improvement. The symbols of this paper should be consistent, such as QB in L157 vs \\textit{QB} in L158, maybe $\\in$in L140, et al.\n6. The second term in Eq. 5 should also be preceded by a weight parameter to control it, and the selection of its parameters should be verified through experiments.\n7. The authors do not define the K in Eq. 4. Besides, the K in Eq. 4 seems to be different from the K in L323, so I suggest the authors use different symbols to distinguish them."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1、1. Please refer to Figure 3 and answer this question: During model testing, are the features $T_{i}$ generated by the Text encoder used as input to the T-CVE model?\n2、In line 362, it is mentioned that K=8. I believe that the choice of K is crucial, and I recommend an ablation study to investigate the impact of different values of K."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper proposes a method called Modality Auxiliary Concepts to enhance video retrieval performance, utilizing large models to generate Visual/Textual Tags that help align visual and textual concepts. The approach is also logically clear and well-structured."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method called Modality Auxiliary Concepts to enhance video retrieval performance, utilizing large models to generate Visual/Textual Tags that help align visual and textual concepts. I hold a positive view of this research."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1、In line 300, for the proposed alignment loss $L_{A}$, please provide the principle or the formula for further clarification.\n2、2. I suggest formatting the tables in the style of three-line tables for improved aesthetics. In line 472, $L_{L_{A}}$ should be changed to $L_{A}$."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "none"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "none"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper conducted thorough experiments to demonstrate the effectiveness of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper utilizes auxiliary concepts to improve the alignment of visual and textual latent concepts, enabling the distinction between each concept. It also introduces an Alignment Loss to strengthen the alignment between visual and textual latent concepts. Experimental results demonstrate the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper contains numerous typographical errors; please review it carefully. For example, on line 142, it reads \"into a shared ddimensional latent embedding\"; on line 236, it says \"Alignment Loss LA and\"; on line 314, it mentions \"Flikr videos\"; and on line 317, it states \"on the vall split\". Figure 3 has low resolution. There are also missing bold elements in Table 2. The authors are requested to carefully proofread their paper. The spelling in Figure 5 also contains errors, such as \"enjoyoing\". The capitalization of the table labels throughout the paper is not consistent.\n2.The LA loss proposed by the author is not clearly stated in the paper, and it seems to lack innovation.\n3. The method proposed by the authors leverages the capabilities of VLM and LLM to generate tags for auxiliary alignment. However, as the authors' visualization results and experimental data analysis suggest, this kind of annotation largely generates confusing information that is detrimental to semantic alignment. This does not sound reasonable. Moreover, the authors still heavily rely on the alignment capabilities of the latent space itself, and the experiments do not reflect the effect of using only tags. I speculate that the effect obtained by using only tags is not good. The authors could further demonstrate the rationality and effectiveness of the proposed method through additional experiments or other means. It is difficult to confirm the effectiveness of the method from the current experimental data because I have observed that some results on DiDeMo and ActivityNet Captions have actually declined. This may be due to overfitting to the first dataset, rather than an improvement brought about by the method itself."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024leveraging,\ntitle={Leveraging Modality Tags for Enhanced Cross-Modal Video Retrieval},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zKFUNRH0hN},\nnote={under review}\n}"
},
"abstract": {
"value": "Video retrieval requires aligning visual content with corresponding natural language descriptions. In this paper, we introduce Modality Auxiliary Concepts for Video Retrieval (MAC-VR), a novel approach that leverages modality-specific tags---automatically extracted from foundation models---to enhance video retrieval.\nPrevious works have proposed to emulate human reasoning by introducing latent concepts derived from the features of a video and its corresponding caption. Building on these efforts to align latent concepts across both modalities, we propose learning auxiliary concepts from modality-specific tags. \nWe introduce these auxiliary concepts to improve the alignment of visual and textual latent concepts, and so be able to distinguish each concept from the other.\nTo strengthen the alignment between visual and textual latent concepts—where a set of visual concepts matches a corresponding set of textual concepts—we introduce an Alignment Loss. This loss aligns the proposed auxiliary concepts with the modalities' latent concepts, enhancing the model's ability to accurately match videos with their appropriate captions. \nWe conduct extensive experiments on three diverse datasets: MSR-VTT, DiDeMo, and ActivityNet Captions. The experimental results consistently demonstrate that modality-specific tags significantly improve cross-modal alignment, achieving performance comparable to current state-of-the-art methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Video Understanding"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5893040ac6e26a674c55010c980ff52c3aa1756f.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Leveraging Modality Tags for Enhanced Cross-Modal Video Retrieval"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zKlFXV87Pp | Distilling Auto-regressive Models into Few Steps 1: Image Generation | main | Active | image autoregressive models;parallel decoding;distillation | generative models | 5;5;6;8 | 3;4;4;4 | 3;2;3;3 | 3;3;3;3 | 3;2;3;4 | 6 | 3.75 | 2.75 | 3 | 3 | 0.471405 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Your method depends on DPM-Solver to generate the trajectories. Do you think this affects results in any way? What other dependencies does your method have that might affect results?\n2. Do the hyperparameters for DPM-Solver affect the results drastically? Why did you pick the current hyperparams?\n3. Do you think a larger amount of timesteps might be needed to get satisfactory results? Is this compatible with your method?\n4. Skip baselines seem conceptually weak. The one-step baseline is guaranteed to fail given results on toy examples. Are there no better baselines - for example progressive distillation, that is used for diffusion models?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The work presents a method that leverages deterministic flow matching to create training data (from an AR model) for a one-step image generation model. When trained on this data this model is a distilled version of the original AR model. The idea of using determinstic flow matching to create the data is novel and seems like a good and innovative candidate idea to achieve this. The paper evaluates the claims on class-to-image generation on ImageNet and compares to simple baselines, achieving acceptable FID scores, that are high but not too high."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The method maps autoregressive model output distributions to a Gaussian distribution through flow matching, creating a deterministic transformation. The goal is to then learn this mapping with a neural network. This enables parallel token generation while preserving the conditional dependencies between tokens in the AR model's output distribution."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "A. It seems like the FID increases, although seemingly acceptable in numerical terms, give rise to blurry and artifact-ridden images, many of which don't even preserve the structure of the class they are trying to generate (monkeys without eyes etc.). Also, another thing that undermines my confidence is that no images are shown in the main paper and instead shown in the appendix. At least some examples are shown.\n\nB. One big problem from (A) is that, since the paper's main premise is to distill an AR model into a one-shot model, good samples are required to demonstrate that this approach is viable. Given the current results we do not know if this approach can actually generate satisfactory images even when scaled or taken into another regime with larger data such as T2I.\n\nI think these are the main issues for me to be convinced of this approach. My question to authors are:\n- Do you expect the approach to become good enough to generate satisfactory images with different scale (data, model size)?\n- If so, what leads you to believe this?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The writing is somewhat unclear. It's not obvious whether $q_t$ represents tokens in the discrete codebooks or outcomes of flow matching. Algorithms 1-4 are much clearer than the main text, so refining the writing to enhance intuitiveness would benefit the readers.\n\n- The performance gap between few-step AR samplers and the baseline remains quite large.\n\n- Some baseline values, such as VAR/LlamaGen in Table 1, appear to be significantly lower than the original paper's reported results.\n\n- It would be interesting to explore how DD performs in text-conditioned image generation tasks.\n\n- The structure of DD closely resembles DMD in diffusion distillation, where a <noise, clean image> dataset is constructed; however, DMD isn't addressed in this paper. Adding a discussion on DMD would be beneficial. Additionally, transforming next-token prediction to AR-based token denoising at a high level seems conceptually similar to MAR (Li et al., 2024, cited). Including further discussion on MAR could also be helpful."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors proposed a new framework to make few-step AR distillation possible. \n- The conversion from next image token prediction to next (set of) image token denoising is a very smart design. The method naturally combines the best of worlds in diffusion models / flow matching and autoregressive image modeling.\n- The performance gain against baseline few-step samplers is huge."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Distilled Decoding (DD), a novel method to reduce the sampling steps of autoregressive image generation models. The approach reframes the traditional next-token or scale prediction process into a denoising procedure guided by flow matching. Starting from fully noisy tokens, DD employs an autoregressive process to map these tokens into image tokens in a flexible number of sampling steps. Experiments with the LlamaGen and VAR models demonstrate that DD significantly outperforms conventional few-step sampling baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The authors propose a novel framework enabling few-step autoregressive (AR) distillation.\n\n- Converting next-image-token prediction to next-image-token denoising is an ingenious design choice, seamlessly integrating the strengths of both diffusion models and autoregressive image modeling.\n\n- The performance improvement over baseline few-step samplers is substantial."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "My questions lean more toward potential future design possibilities, as the explanations provided in the paper are relatively clear.\n\nQ1: The approach seems very similar to a variant of the Consistency Model (CM) applied to the AR domain, replacing the diffusion model in CM with an AR model. What are the advantages of this substitution? Line 319 mentions that, compared to diffusion flow matching, DD “has an easy way to jump back to any point in the trajectory.” This is noted as a characteristic, but what practical benefits does it provide? Additionally, the ODE sampling trajectory in CM is also theoretically fixed and reversible. If my understanding is incorrect, please correct me.\n\nQ2: Is there a way for this method to be trained from scratch, similar to Consistency Training?\n\nQ3: Currently, the method requires a dataset size of 1.2M. How significantly does the data volume impact performance? Related distillation methods, such as DMD[1], use relatively smaller datasets. Is there a possibility of a similar approach to DMD2[2] that could discard the need for noise-data pairs?\n\nQ4: Q: In line 234, is $\\pi$ the same as z used later on? There may be a misalignment in the notation here.\n\n[1]. Yin T, Gharbi M, Zhang R, et al. One-step diffusion with distribution matching distillation[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024: 6613-6623.\n[2]. Yin T, Gharbi M, Park T, et al. Improved Distribution Matching Distillation for Fast Image Synthesis[J]. arXiv preprint arXiv:2405.14867, 2024."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is concisely written with a clear line of thought. The approach of constructing a continuous embedding space in AR and matching it to a Gaussian distribution is particularly interesting. This construction effectively combines the discrete cross-entropy model from AR-based methods with the probability distribution strategies that have proven successful in diffusion models, allowing the concept of consistency distillation from diffusion to be successfully applied within the AR field. The comparisons are comprehensive, and the experiments are well-executed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper is concisely written with a clear line of thought. The approach of constructing a continuous embedding space in AR and matching it to a Gaussian distribution is particularly interesting. This construction effectively combines the discrete cross-entropy model from AR-based methods with the probability distribution strategies that have proven successful in diffusion models, allowing the concept of consistency distillation from diffusion to be successfully applied within the AR field. The comparisons are comprehensive, and the experiments are well-executed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper successfully applies the consistency distillation (CD) technique from diffusion models to the AR field. However, the current results still fall short compared to the pre-trained model."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In table 1, why VAR-DD with one step achieves better performance than VAR-DD with two steps?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well written and easy to follow.\n2. The presented idea of compressing AR models with Flow Matching is intersting. Given that autoregressive models tend to be slow, accelerating them is a crucial challenge. The efforts presented in this paper could provide valuable insights for the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a method called Distilled Decoding (DD) that distills autoregressive models into fewer steps. It employs flow matching to establish a deterministic mapping from Gaussian noise to the output distribution of the pretrained autoregressive models. Extensive experiments demonstrate that DD outperforms the baselines, accelerating autoregressive models with minimal performance degradation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lines 201-203 state that modeling the joint distribution of several tokens is impractical due to the vast possible space. However, it seems that the proposed method uses Flow Matching to learn the joint distribution of tokens (the full sequence in the one-step case). This is somewhat confusing—how does the proposed method tackle this issue?\n2. The image quality shows significant degradation (as indicated by both FID scores and the figures), raising concerns about the practicality of the proposed method.\n3. It would be beneficial to include experiments with text-conditioned LlamaGen to demonstrate that the method can be adapted to more complex scenarios."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024distilling,\ntitle={Distilling Auto-regressive Models into Few Steps 1: Image Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zKlFXV87Pp},\nnote={under review}\n}"
},
"abstract": {
"value": "Autoregressive (AR) models have recently achieved state-of-the-art performance in text and image generation. However, their primary limitation is slow generation speed due to the token-by-token process. We ask an ambitious question: can a pre-trained AR model be adapted to generate outputs in just one or two steps? If successful, this would significantly advance the development and deployment of AR models. We notice that existing works that attempt to speed up AR generation by generating multiple tokens at once fundamentally cannot capture the output distribution due to the conditional dependencies between tokens, limiting their effectiveness for few-step generation. To overcome this, we propose Distilled Decoding (DD), which leverages flow matching to create a deterministic mapping from Gaussian distribution to the output distribution of the pre-trained AR model. We then train a network to distill this mapping, enabling few-step generation. We evaluate DD on state-of-the-art image AR models and present promising results. For VAR, which requires 10-step generation (680 tokens), DD enables one-step generation (6.3$\\times$ speed-up), with an acceptable increase in FID from 4.19 to 10.65. Similarly, for LlamaGen, DD reduces generation from 256 steps to 1, achieving an 217.8$\\times$ speed-up with a comparable FID increase from 6.52 to 17.98. In both cases, baseline methods completely fail with FID scores $>$100. As the first work to demonstrate the possibility of one-step generation for image AR models, DD challenges the prevailing notion that AR models are inherently slow, and opens up new opportunities for efficient AR generation."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"image autoregressive models",
"parallel decoding",
"distillation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f66ca334bf5d720ab5a87d71ffa3b733e5818ec6.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Distilling Auto-regressive Models into Few Steps 1: Image Generation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zKoUV1wHRJ | DiFSD: Ego-Centric Fully Sparse Paradigm with Uncertainty Denoising and Iterative Refinement for Efficient Self-Driving | main | Active | Autonomous Driving;End-to-End Fully Sparse Paradigm;Iterative Refinement;Uncertainty Denoising | applications to robotics, autonomy, planning | 3;5;5;6 | 4;4;4;4 | 3;2;3;3 | 2;2;2;3 | 2;2;2;3 | 4.75 | 4 | 2.75 | 2.25 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What’s the relationship between agent features F_a, map features F_m and surrounding features F_o? Will map queries involved into the selection of interactive queries? \n2. Will the path of other objects be used to calculate the geometric prior in query selection? There may be some objects drive far form the ego path, which is closet to the path but should be pick out. While some objects drive close to the ego path when lane changing, which are far from the path but should pay more attention to.\n3. In Figure 4, why the ego query is updated by weighted object queries? Does that also mean the map queries will not affect the ego query feature?\n4. The introduction of DiFSD(Dense) confuses me about the contribution in sparsity. As you claimed that dense scene representation is computationally intensive, why the dense-based setting is even faster than your fully sparse design (14.8 FPS vs 10.7 FPS)? Meanwhile, I thinke the latency comparison of w/ and w/o interactive query selection will help to illustrate the efficiency of the module.\n5. The visualization of CIPV/CIPS in different scenes iteratively will help reader to understand the process of iterative motion planner. Will you provide some qualitative results?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This idea of iterative motion planner make sense, which is similar to TCP[1] which iteratively adjust attention with guidance of trajectory.\n2. The performance on nuScenes dataset outperforms SparseDrive especially in L2 error.\n\n[1] Penghao Wu, Xiaosong Jia, Li Chen, Junchi Yan, Hongyang Li, and Yu Qiao. Trajectory-guided control prediction for end-to-end autonomous driving: a simple yet strong baseline. NeurIPS, 2022."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed an iterative motion planner based on sparse paradigm, which find out interactive object queries into joint motion-planning module. In open-loop nuScenes dataset, the method outperforms previous sparse method in L2 error and efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The overall novelty of this paper is limited. It’s a good attempt to apply trajectory-guided attention based on previous sparse perception works. However, the presentation of this paper is not enough to demonstrate how it works and how many time it saves. \n2. The contribution in sparsity which aims to reduce the computation is confused, since the dense setting achives even faster inference speed. It looks that the dense representation obtains a better balance between performance and efficiency, while the results of dense setting are also used to calculate the improvements in abstract by the authors.\n3. The comparision between previous works in Table 1 is unfair. Specially, the UniAD and VAD utilize different evaluation protocol as mentioned in PARA-Drive [2]. It looks this paper follows the approach in VAD without transforming UniAD results, which will obtain lower L2 error and collision rate. Therefore, the claimed performance improvements to UniAD are incorrect. Meanwhile, the collision rate of SparseDrive shows large gap between reported in its paper, which deserve to re-check.\n4. The method is only evaluated in open-loop nuScenes dataset without close-loop simulation.\n\n[2] Xinshuo Weng, Boris Ivanovic, Yan Wang, Yue Wang, and Marco Pavone. Para-drive: Parallelized architecture for real-time autonomous driving. In CVPR, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Why do you use bs=1 on A100 GPUs, is the model training consuming so much memory?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper draw inspiration from human driver, and makes the model learning to focus the closest target in path. The proposed Intention-Guided Geometric Attention is interesting. \n2. The experiments are extensive and detailed, supporting the effectiveness of proposed modules."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a fully sparse framework for end-to-end driving, it considers the driving habit of human driver to focus on closest target in path to improve the interaction modeling. Besides, it incorporates position-level diffusion and trajectory-level denoising to improve planning performance. The proposed method achieves SOTA performance on nuScenes dataset."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. One main concern is the experiments are only conducted on open-loop benchmark nuScenes, which suffers from short-cut learning in [1]. Close-loop experiments is needed to prove the effectiveness of the whole model.\n2. I think the framework is built on SparseDrive[2] with optimized planning modules, however, the worst result for L2 in ablation study has already surpassed SparseDrive by a large margin, it is confusing where the good performance comes from.\n\n\n[1] Li, Zhiqi, et al. \"Is ego status all you need for open-loop end-to-end autonomous driving?.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024. \n[2] Sun, Wenchao, et al. \"SparseDrive: End-to-End Autonomous Driving via Sparse Scene Representation.\" arXiv preprint arXiv:2405.19620 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What is the performance of the model if the driving command never appears in the training set? (e.g., turning left and right, or the driving command is empty)"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed solution sounds solid in theory. Specially, the proposed geometric prior through intention-guided attention considers environment in a novel way. In addition, excluding computationally intensive dense scene representation learning and redundant environmental modeling indeed helps speed up the computation. \n2. The ablation study and supplementary material are helpful. Readers can get more information from the ablation study results.\n3. The analysis and discussion guide readers think more deeper about the model design and the performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose an ego-centric fully sparse paradigm for end-to-end self-driving. Specifically, the proposed solution mainly consists of sparse perception, hierarchical interaction and iterative motion planner. The model does not consist of any computationally intensive dense scene representation learning and redundant environmental modeling. Besides, the authors introduce a geometric prior through intention-guided attention, where the closest inpath vehicle/stationary are gradually picked out through ego-centric cross attention and selection. The experimental results show that the proposed solution achieves high accuracy in the self-driving planning task and runs faster."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It will be better if the authors could report some failure cases. Especially for those cases which the predictions are totally opposite toward the navigation command. \n2. It will be better if the authors could mark the symbols on Figures. In this way, readers can easily associate text context with Figures."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "none"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please clarify the superiority of the combination of existing modules, which makes the work in this paper with limited novelty."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "•\tThe paper proposes a novel autonomous driving paradigm, DiFSD, which adopts an ego-centric design and sparse representation, showcasing originality.\n•\tThe experimental results demonstrate DiFSD's superior performance on the nuScenes dataset, indicating high quality.\n•\tThe paper is well-structured and logically coherent, making it easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces DiFSD, an ego-centric fully sparse paradigm for autonomous driving. By focusing on key agents (CIPV/CIPS) and mimicking human driving behavior, DiFSD improves efficiency and performance through sparse perception, hierarchical interaction, and iterative motion planning, while modeling uncertainty with motion diffusion and trajectory denoising. Experiments on the nuScenes dataset show a 66% reduction in L2 error, 77% lower collision rate, and 8.2× faster efficiency compared to UniAD."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "•\tThe innovation seems to be built upon combining existing modules, lacking in-depth theoretical analysis and explanation.\n•\tThe reference format is inconsistent, with varying capitalization and abbreviations for conference names. For example, \"IEEE/CVF conference on computer vision and pattern recognition\", \"IEEE/CVF Conference on Computer Vision and Pattern Recognition\", and \"IEEE conference on computer vision and pattern recognition\" are used interchangeably. Additionally, while ICRA and IROS are abbreviated, it is suggested that CVPR and ICCV also be abbreviated for consistency.\n•\tFig. 3 and Fig. 4 illustrate the dual interaction layer within the hierarchical interaction module and the planning optimization layer in the motion planner module, as well as the interactive score fusion process in the geometric-attended selection step, respectively. However, both figures are missing essential captions.\n•\tThe paper includes only two formulas: Formula 1 explains the interactive score fusion by combining attention, geometric, and classification scores, while Formula 2 describes the Loss Function. However, important sections such as Uncertainty Denoising lack necessary formulaic descriptions, which would provide more clarity and rigor to the proposed methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024difsd,\ntitle={Di{FSD}: Ego-Centric Fully Sparse Paradigm with Uncertainty Denoising and Iterative Refinement for Efficient Self-Driving},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zKoUV1wHRJ},\nnote={under review}\n}"
},
"abstract": {
"value": "Current end-to-end autonomous driving methods resort to unifying modular designs for various tasks (e.g. perception, prediction and planning). Although optimized in a planning-oriented spirit with a fully differentiable framework, existing end-to-end driving systems without ego-centric designs still suffer from unsatisfactory performance and inferior efficiency, owing to the rasterized scene representation learning and redundant information transmission. In this paper, we revisit the human driving behavior and propose an ego-centric fully sparse paradigm, named DiFSD, for end-to-end self-driving. Specifically, DiFSD mainly consists of sparse perception, hierarchical interaction and iterative motion planner. The sparse perception module performs detection, tracking and online mapping based on sparse representation of the driving scene. The hierarchical interaction module aims to select the Closest In-Path Vehicle / Stationary (CIPV / CIPS) from coarse to fine, benefiting from an additional geometric prior. As for the iterative motion planner, both selected interactive agents and ego-vehicle are considered for joint motion prediction, where the output multi-modal ego-trajectories are optimized in an iterative fashion. Besides, both position-level motion diffusion and trajectory-level planning denoising are introduced for uncertainty modeling, thus facilitating the training stability and convergence of the whole framework. Extensive experiments conducted on nuScenes dataset demonstrate the superior planning performance and great efficiency of DiFSD, which significantly reduces the average L2 error by 66% and collision rate by 77% than UniAD while achieves 8.2x faster running efficiency."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Autonomous Driving",
"End-to-End Fully Sparse Paradigm",
"Iterative Refinement",
"Uncertainty Denoising"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/17076696cb8c0980e3147185ea46e34a4b69bcf7.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/1b8cef457a59dad7462e9fdd87769580e7c36d8f.pdf"
},
"title": {
"value": "DiFSD: Ego-Centric Fully Sparse Paradigm with Uncertainty Denoising and Iterative Refinement for Efficient Self-Driving"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zKvrOOBouT | EvA: Erasing Spurious Correlations with Activations | main | Active | spurious correlation;compute efficiency;data efficiency | alignment, fairness, safety, privacy, and societal considerations | 5;5;6;6 | 4;3;3;4 | 2;3;2;3 | 2;2;2;3 | 3;3;3;3 | 5.5 | 3.5 | 2.5 | 2.25 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the Weaknesses section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors propose a post-hoc method for developing robustness to spurious correlations, which does not involve any additional training / heavy computational workload, which is a big positive in my opinion. The method operates by identifying specific dimensions in the embedding space that satisfy certain threshold criteria for some forms of spuriosity metrics proposed by the authors, dropping them from the data representations, and solving a logistic regression from this updated embedding space to the label space. None of these operations seem to be computationally expensive.\n\n2. The authors propose ways to measure the degree of spuriosity of a specific dimension, when additional unbiased samples both are and are not available. This is a useful contribution which may also be applicable in other scenarios beyond bias mitigation.\n\n3. The experimental results are strong. Performance improvements over state-of-the-art are quite significant on standard benchmarks, with minimal extra computational overhead."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an approach to mitigate biases learned through spurious correlations by erasing dimensions in the embedding space that are more likely to be associated with spurious features. The authors propose two metrics - namely consistency and evidence energy, for cases when additional unbiased data respectively are and are not available for training, which are used to identify spurious dimensions in the embedding space. Experiments reveal significant improvements over state-of-the-art bias mitigation approaches mainly in the absence of unbiased training data, but also to some extent when it is available."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The calculation of both consistency (eq. 4) and evidence energy (eq 7) are done independently for each feature dimension $i$. However, it is well known that input features do not necessarily map neatly onto independent dimensions in the feature space [a, b]. Given this, the feature erasure in eq. 9 should technically not be achieving what it claims to achieve. By turning full dimensions on/off, it could be - (i) partially turning off some core features that could be in superposition with a spurious feature; and (ii) not fully turning off spurious features, since it may be in superposition with some core feature dimension which is not turned off.\n\n2. In computing $C_{(ik)}$ in eq. 4, there would be significant disparity in the distribution of the features stemming from sampling bias, since the number of unbiased samples is typically << the number of biased samples in practical datasets, which itself could skew the value of $d$, not providing a faithful measure of consistency. How do the authors account for this imbalance when applying the distance metric $d$?\n\n3. Since the idea of EvA is essentially to remove dimensions from the embedding space that correspond to spurious features, an ablation for what constitutes the best approach for selecting the subspace to drop is necessary. EvA employs thresholding on consistency and evidence energy to select spurious subspaces. However, whether consistency and evidence energy are really the best metric to look at when it comes to performing this selection needs to be either theoretically or empirically established. For this, one empirical baseline could be comparison with random feature drop out in the embedding space. I would suggest the authors to explore this in further detail and consider developing such baselines to establish the optimality of their specific feature erasure mechanisms.\n\n4. Table 5 shows that the gains from EvA are more significant in the absence of extra unbiased training data. The authors should provide a discussion on why their method might have a stronger advantage over SOTA when unbiased training data is not present.\n\n5. When extra training data is not available, how does one perform the hyperparameter search for $\\epsilon$? Additionally, when training data is available, although a single pass of EvA is computationally inexpensive, the hyperparameter sweep over $\\epsilon$ may be time consuming. Discussing these, especially, the former case of selecting $\\epsilon$ in the absence of an unbiased train subset is necessary."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In Table 5, how many runs were performed to compute mean and std, and was the same erase ratio used in all of the EvA runs?\n\nSection 4.4 (and Fig. 3 c) discuss the effect of the erase ratio on the worst accuracy. How does this hyperparameter influence the mean accuracy?\n\nIn Table 7, mean acc and worst acc are reported for CelebA but unbiased/conflicting acc in Table 5. The results on Waterbirds suggest that your method results in a better worst acc but worse mean acc for the ResNet-50 than for the ResNet-18: Does this pattern hold on CelebA?\n\nGiven the computational efficiency and no requirement for unbiased data or annotations of the spurious correlations, it should be quite feasible to evaluate EvA-E on ImageNet scale spurious correlations benchmarks (e.g. [1],[2]). In these more realistic settings, the distinction between spurious and core features become more complex. Can you provide empirical evidence or arguments that your evidence energy measure can still be used for detection and mitigation in such settings?\n\n\n[1] Singla et al., Salient ImageNet: How to discover spurious features in Deep Learning?\n\n[2] Neuhaus et al., Spurious Features Everywhere -- Large-Scale Detection of Harmful Spurious Features in ImageNet"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- computational cost are lower compared to similar methods that do not require unbiased data\n- their mitigation outperforms other methods on several common spurious correlation benchmarks\n- for the variant including unbiased data, the introduced method requires less samples for the same accuracy\n- the procedure seems to be well motivated including theoretical analysis"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a method to detect and mitigate spurious correlations learned by an image classifier. They present two measures to identify channels in the penultimate layer of the model which correspond to such spurious correlations, one of them requiring an additional unbiased dataset (“consistency”) while the other can be computed on the the training data (“evidence energy”). After identifying a set of spurious channels, the corresponding weights in the last layer are set to zero and the remaining weights (of the last layer) are retrained."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- the experiments establishing the correlation between their two measures are based only on one class of one of the datasets\n- experiments are only based on small toy settings with strong spurious correlations\n- the method is strongly inspired by OOD methods, but apart from a brief mention in the conclusion, this is not discussed in the main paper \n\nminor: fontsize in Fig. 1, Tab. 3, Tab. 4 are too small, the definition of “energy” actually corresponds to the “free energy” which might be confusing"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Why spurious features often lead to high confidence predictions?\n- Can the proposed method used for other model architectures beyond convolutional neural networks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Two practical methods, namely EvA-C and EvA-E, are proposed for detecting and mitigating spurious features in the penultimate activations, depending on the existence of an unbiased dataset. The two proposed methods are both efficient and effective in mitigating spurious correlations.\n\n- The two metrics originate from the spurious feature detection methods can be used to measure the spuriousness of features from the penultimate layer of a model. These metrics are also useful to validate the effectiveness of spurious correlation mitigation methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Existing methods on mitigating spurious correlations typically requires unbiased data and multiple rounds of retraining. The paper proposes a computational- and data-efficient pipeline that detects spurious features before the last linear layer. When there is no unbiased datasets, the contribution of each feature to the network’s prediction\nconfidence is exploited for detection. If the unbiased data is available, the consistency of penultimate activations between spurious and unbiased datasets is used for detecting spurious features. Through channel-based erasure and re-weighting on the final linear layer, reliance on spurious correlations can be effectively mitigated."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Contrary to the claim in Line 93, the performance of the proposed method is not the state-of-the-art in comparison with some methods [1,2]. For example, with ResNet50 as the backbone, EvA-E has a worst-group accuracy of 86.6% (Table 7), while [2] achieves 89.1%.\n\n- The unbiased dataset $\\mathcal{D}\\_{\\text{unbiased}}$ is first introduced in Line 156; however, it is unclear what $\\mathcal{D}\\_{\\text{unbiased}}$ can be considered as \"unbiased\". Based on the descriptions in Line 230 and Line 312, $\\mathcal{D}\\_{\\text{unbiased}}$ is selected from the validation set. In some datasets, such as CelebA, the validation set also has gender bias. If $\\mathcal{D}\\_{\\text{unbiased}}$ is defined as a set of group-balanced data where each class of samples distribute equally across different spurious features, then group annotations are required to obtain $\\mathcal{D}\\_{\\text{unbiased}}$, contrary to the claim in Line 50-51. Further clarification on this point would be beneficial.\n\n- In Eq. (4), why the unknown test distribution $\\phi_{\\text{test}}^{ik}$ can be approximated via $\\phi_{\\text{unbiased}}^{ik}$? In some datasets such as CelebA, the test set is also biased. It would be helpful to further clarify this point.\n\n[1] LaBonte et al., Towards last-layer retraining for group robustness with fewer annotations, NIPS, 2023.\\\n[2] Li et al., Bias Amplification Enhances Minority Group Performance, TMLR, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the questions raised in the Weaknesses section."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The method is well-motivated, and I appreciate its simplicity (in terms of both understanding and computation).\n- Providing theoretical analysis adds a more principled foundation to the two measures.\n- There are experiments showing that the erased features are indeed the spurious ones on CMNIST, and the correlation between evidence energy and consistency, which further supports that the method works as expected."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes EVA, a method for addressing spurious correlations in datasets. It identifies spurious features using a consistency measure and the evidence energy measure in scenarios with and without unbiased datasets, respectively. Theoretical analyses are provided regarding the relationship between these two measures and a feature’s spuriousness. Experiments are conducted to demonstrate the method's effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Since the original DFR paper uses ResNet50 as the architecture, it would be a good sanity check to compare results in that same setting. The authors provided results for ResNet50 in Appendix E.2, but they mention that, to mimic real-world scenarios, they tuned hyperparameters based on Mean Accuracy instead of Worst Accuracy. This might be debatable, as the setting where DFR is applicable assumes an unbiased validation set. Additionally, I wonder if the setup is consistent between ResNet18 in the main paper and ResNet50 in the appendix for the Waterbirds dataset. Could you clarify whether hyperparameters for ResNet18 were tuned based on Mean Accuracy or Worst Accuracy?\n- Overall, since most existing papers evaluate their methods on ResNet50 while this paper primarily uses ResNet18 (this makes the numbers not comparable to those reported in other papers; in general, the numbers are much lower for all methods than those in the literature potentially due to the architecture), and some baselines (e.g., JTT, SSA [1], AFR [2], and [3]) are not included in the main table, Table 5, it is hard to compare the proposed method to the state of the art and to assess whether it truly stands out among existing techniques. For example, I wonder if the proposed method can outperform JTT on the Waterbirds dataset when ResNet50 is used, which has a reported worst-group accuracy of 86.7%, a relatively high value compared to the numbers in this paper obtained on ResNet18.\n- Perhaps a detailed discussion and comparison with [1] is necessary, given that it also achieves the same goal: lightweight reweighting of the last layer without group labels.\n\n\n[1] Qiu, Shikai, et al. \"Simple and fast group robustness by automatic feature reweighting.\" International Conference on Machine Learning. PMLR, 2023.\n\n[2] Nam, Junhyun, et al. \"Spread spurious attribute: Improving worst-group accuracy with spurious attribute estimation.\" arXiv preprint arXiv:2204.02070 (2022).\n\n[3] Liu, Sheng, et al. \"Avoiding spurious correlations via logit correction.\" arXiv preprint arXiv:2212.01433 (2022)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "EvA is a method that effectively handles spurious correlations in pretrained networks by explicitly erasing class-specific spurious connections, improving both data and compute efficiency."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024eva,\ntitle={EvA: Erasing Spurious Correlations with Activations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zKvrOOBouT},\nnote={under review}\n}"
},
"abstract": {
"value": "Spurious correlations often arise when models associate features strongly correlated with, but not causally related to, the label e.g. an image classifier associates bodies of water with ducks. To mitigate spurious correlations, existing methods focus on learning unbiased representation or incorporating additional information about the correlations during training. This work removes spurious correlations by ``**E**rasing **wi**th **A**ctivations'' (EvA). EvA learns class-specific spurious indicator on each channel for the fully connected layer of pretrained networks. By erasing spurious connections during re-weighting, EvA achieves state-of-the-art performance across diverse datasets (6.2\\% relative gain on BAR and achieves 4.1\\% on Waterbirds). For biased datasets without any information about the spurious correlations, EvA can outperform previous methods (4.8\\% relative gain on Waterbirds) with 6 orders of magnitude less compute, highlighting its data and computational efficiency."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"spurious correlation",
"compute efficiency",
"data efficiency"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/4e5c5685f1373f04de115d86cf61edd9d99bc3c6.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "EvA: Erasing Spurious Correlations with Activations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zLHP6QDWYp | Towards Realistic Long-tailed Semi-supervised Learning in an Open World | main | Withdraw | Open-world;Realistic long-tailed semi-supervised learning;Logit adjustment | unsupervised, self-supervised, semi-supervised, and supervised representation learning | Yuanpeng He;Lijian Li;Xiancai Chen;Chi-Man Pun;Wenpin Jiao;Zhi Jin | ~Yuanpeng_He1;~Lijian_Li1;~Xiancai_Chen1;~Chi-Man_Pun1;~Wenpin_Jiao1;~Zhi_Jin1 | 3;3;3;5;5 | 4;5;3;4;4 | 3;2;2;2;3 | 2;2;2;2;2 | 2;2;2;2;3 | 3.8 | 4 | 2.4 | 2 | 2.2 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The combination of long-tailed, semi-supervised, and open-world learning is interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses the limitations of existing open-world long-tailed semi-supervised learning (OLSSL) algorithms, which assume identical distributions between known and novel categories. It introduces a more realistic open-world long-tailed semi-supervised learning (ROLSSL) setting, where no assumption is made about the distribution relationship between known and novel classes. The authors propose a novel approach called dual-stage post-hoc logit adjustments, which dynamically adjusts predictive probabilities to mitigate category bias during learning. The method takes into account the frequency of samples, the number of categories, and the size of the data, improving performance by more selectively utilizing imbalanced unlabeled data. Experiments on CIFAR100 and ImageNet100 show some improvements in performance, demonstrating the effectiveness of the proposed method and setting a strong baseline for future research."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The writing of this paper is not very clear. For example, 'S/N Consistency' in Tab. 1, a key feature of the proposed setting, is never explained.\n2. Open-world Long-tailed Semi-supervised Learning seems quite similar to [*1], which is not discussed in related works. Therefore, the novelty of this paper may be weak.\n3. Experiments may be insufficient. Only some tiny datasets are considered. Large-scale benchmarks (e.g., ImageNet-LT or iNaturalist) should be included.\n\n\n[*1] SimPro: A Simple Probabilistic Framework Towards Realistic Long-Tailed Semi-Supervised Learning. ICML'24"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How to handle the scenarios where the number of new classes is unknow? It seems that the proposed method requires the prior class number, which might not realistic.\n2. Could the authors summarize the main novelty intuitively?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper is well-motivated and easy to follow.\n2. The proposed problem setting is realistic and reflects the long-tailed nature of the real world.\n3. This paper conducts comprehensive experiments to show the superiority of the method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies a realistic open-world setting, namely open-world long-tailed semi-supervised learning (ROLSSL), where the distribution relationships between known and novel categories might be different. To solve this problem, the paper proposes dual-stage post-hoc logit adjustments. Specifically, it estimates the distributions of the unlabeled data and utilizes the estimations to re-adjust the logits. Comprehensive experiments validate the superiority of the method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The comparative methods are outdated, and the most recent comparative method of this paper is ECCV 2022. In the literature, OSSL is also referred to as generalized category discovery (GCD). Some recent GCD methods, including GCD [R1] and SimGCD [R2], should be included in Table 2 and 3. There are also some works [R3, R4] in GCD considering long-tailed scenarios, which are highly related to this work and should be compared.\n2. The novelty of this paper is limited. The basic loss functions $L_{pair}$ and $L_{reg}$ are well-known techniques in novel class discovery and GCD, while the logit adjustments are also prevalent methods in long-tailed classification. Simply applying model predictions in logit adjustments is naïve and might not provide great insights to the community.\n3. The method still requires strong priors of the ground truth new class numbers, which is unrealistic.\n4. Experiments with the ViT backbone should be conducted.\n5. The notations should be concise and clear.\n\nReferences:\n[R1]. Generalized Category Discovery. CVPR 2022.\n[R2]. Parametric Classification for Generalized Category Discovery: A Baseline Study. ICCV 2023.\n[R3]. Novel Class Discovery for Long-tailed Recognition. TMLR 2023.\n[R4]. Generalized Categories Discovery for Long-tailed Recognition. ICCV 2023, workshop."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Have you considered running experiments on iNaturalist? It has more classes than any of the benchmarks used in this paper and presents a more challenging fine-grained, long-tailed distribution."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The problem addressed by this paper is highly practical, as determining the distributional relationships between known and novel categories is indeed nearly impossible in real-world scenarios.\n\n2. The paper is well-written and easy to follow.\n\n3. Key experimental results demonstrate that the DPLA approach consistently outperforms the OpenLDN baseline in both known and novel class recognition, achieving up to a 50.1% improvement on datasets like CIFAR-100 and ImageNet-100. Authors further showcase the robustness and adaptability of the proposed method across varying levels of data imbalance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a approach called Dual-stage Post-hoc Logit Adjustments (DPLA) to tackle the challenges in Realistic Open-world Long-tailed Semi-supervised Learning (ROLSSL). DPLA is a two-stage logit adjustment technique designed to improve model performance on both frequent and infrequent (tail) classes. In the first stage, logits are adjusted based on the sample frequency and class count, which helps balance predictions across known classes. In the second stage, adjustments are dynamically refined using the predicted class distribution from unlabeled data, with a particular focus on emphasizing tail classes to counter bias towards dominant classes. This dual-stage approach enables better recognition in long-tailed, semi-supervised, and open-world settings, especially where labeled data is sparse and imbalanced. Key experimental results highlight that the DPLA approach consistently surpasses the OpenLDN baseline in both known and novel class recognition, achieving up to 50.1% improvement experiments on datasets such as CIFAR100 and ImageNet100. They also show the robustness and adaptability of the proposed method across different data imbalance conditions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A key concern is the complexity of the proposed pipeline, which involves multiple stages and numerous hyper-parameters. With additional stages and hyper-parameters to tune, it becomes more challenging to generalize this pipeline to new problem settings and datasets. For instance, as shown in Tables 3 and 4, using non-optimal scaling factors can significantly impact performance on CIFAR-100 and ImageNet-100, despite both datasets having the same number of classes. Have you explored methods to automatically tune the hyperparameters or analyzed which parameters are most sensitive? \n\n2. Technical contributions. A primary contribution of this paper is the proposed logit adjustment method for pseudo-labels, which often exhibit class imbalance. However, the issue of naturally imbalanced pseudo-labels has been previously addressed by DebiasPL [1], which also applies a logit adjustment based technique to mitigate class imbalance in pseudo-labels, resulting in improved classification performance on long-tailed datasets. A discussion on the technical distinctions between this approach and DebiasPL would be beneficial.\n\n3. While this paper presents experiments on several benchmarks to demonstrate performance gains, most datasets used are small to medium-scale, such as ImageNet-100 or CIFAR. I believe it is essential to include experiments on larger datasets, such as iNaturalist or ImageNet-1k or ImageNet-22k, to better assess the generalizability of the proposed method. It will be nice if you could provide any potential challenges or modifications needed to apply your method to larger datasets.\n\n4. In addition to presenting results on novel and known subsets, I believe it is essential to include performance metrics for many-shot, medium-shot, and few-shot subsets. This would help clarify whether the current logit adjustment methods compromise performance on many-shot classes to improve results on few-shot classes—a common issue with most logit adjustment techniques. It would be valuable for the authors to discuss whether their proposed method faces this challenge and, if not, to explain why it avoids this problem. If it does, providing insights into potential strategies to address this limitation in future work would be beneficial. \n\n5. I may suggest authors to include a specific breakdown of performance metrics for these subsets in their results tables. Add a discussion section that explicitly addresses the trade-offs between performance on different shot categories and potential strategies to balance these trade-offs. In my opinion, while improving performance on few-shot classes is important, it is equally crucial to avoid jeopardizing performance on many-shot classes, as they are more prevalent in real-world applications. \n\n[1] Wang, Xudong, Zhirong Wu, Long Lian, and Stella X. Yu. \"Debiased learning from naturally imbalanced pseudo-labels.\" In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14647-14657. 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "This work may need more effort to refine its writing and the methodology sections need more explanations for equations as mentioned above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "SSL in a more realistic setting is proposed and explored in this work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper mainly focuses on the more challenging SSL setting - open-world long-tailed SSL (OLSSL). The authors propose a more realistic setting than the existing OLSSL: labeled samples are much less than the unlabeled ones. Based on the existing OLSSL method, this work proposes a dual-stage post-hoc logit adjustment method to calibrate and optimize the model. Several experiments are conducted for verification."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Since readers may not be quite familiar with the development of open-world long-tail SSL, it is suggested that more background about the existing OLSSL algorithms be included in Section 3.2, especially for the definition of Eq(1).\n2. The dual-stage post-hoc logit adjustment section is not easy to read. Specifically, \n- why the aremax is used in Eq(2)? According to the Fig 1, it is expected to be a distribution adjustment. \n- What is the definition of F_{y_i^l} in Eq(3)? If the labeled samples are limited, will this sample frequency be biased? How C_base and S_base are defined? \n- What is the relationship of the equation in L244 and the Eq(2)? \n- for unlabeled samples, how the sample ratio \\pi_c^r in Eq(4) is obtained when the class of the unlabeled samples is unknown? How alpha and beta are defined? What is the meaning of (alpha-beta) in Eq(4)?\n3. Does the \\tau in Eq(7) denote the ones in Eq(2)? If so, why the operation become plus but not minus as in Eq(2)? \n4. The proposed method is argued to be a two-stage method, does it mean training labeled and unlabeled samples in different stages? However, according to Eq(8), the model seems optimized on labeled and unlabeled samples together.\n5. In Figure 1, the right side of the diagram shows a question mark (\"?\") symbol, does it represent the unlabeled samples? If so, why does the labeled branch also include a depiction of unlabeled samples? Is there a particular significance or reason for this choice in the illustration?\n6. The paper seems primarily focused on addressing the long-tailed problem while offering little innovation for the open-world problem.\n7. In the defined Realistic Open-World Long-Tailed Semi-Supervised Learning (ROLSSL) framework, it is assumed that the number of novel classes is known (L200). The proposed method leverages this prior knowledge for model calibration (Eq(3)). However, this assumption seems somewhat unrealistic, as it is uncommon to have prior knowledge of the exact number of novel classes present in the unlabeled samples."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The analysis of hyperparameters $\\tau_1$ and $\\tau_2$ are missing."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The studied problem in this paper is practical and underexplored. Most previous studies focus on long-tailed or open-world semi-supervised learning, without considering both.\n2. The proposed approach is simple and easy to implement. This paper improves previous methods by proposing a dual-stage post-hoc logit adjustment technique, which does not involve complex strategies or additional learnable parameters.\n3. Experiments on six datasets show the advantage of the proposed approach against previous methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper aims to solve a complex task in semi-supervised learning, i.e., the long-tailed label distribution and the presence of novel classes in unlabeled data. The paper presents a simple solution for this task based on techniques such as logit adjustment, and entropy maximization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The novelty of the proposed method seems limited. As stated in the *Strengths*, the proposed method is simple and easy to implement because it is pretty much built upon existing techniques. However, this also makes the contribution of the method minor.\n\n2. The rationale behind the method is not well-explained. It is unclear why the logit adjustment strategies for labeled and unlabeled data are different. Also, the design of Eq. (3) is not straightforward and needs in-depth understanding and justification. In Eq. (4), the impact of hyperparameters $\\alpha$ and $\\beta$ are not studied.\n\n3. The writing needs improvement. For instance, the paper should include the definition of $\\mathcal{L}\\_{pair}$ and $\\mathcal{L}\\_{reg}$ to be self-contained.\n\n4. It would be better if the paper could include real-world semi-supervised datasets which follow long-tailed label distribution and also include novel classes in unlabeled data.\n\n5. It seems that the logit adjustment strategy in open-world semi-supervised learning has been considered in [1].\n\n[1] Bridging the Gap: Learning Pace Synchronization for Open-World Semi-Supervised Learning. IJCAI 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nhe2024towards,\ntitle={Towards Realistic Long-tailed Semi-supervised Learning in an Open World},\nauthor={Yuanpeng He and Lijian Li and Xiancai Chen and Chi-Man Pun and Wenpin Jiao and Zhi Jin},\nyear={2024},\nurl={https://openreview.net/forum?id=zLHP6QDWYp}\n}"
},
"abstract": {
"value": "Open-world long-tailed semi-supervised learning (OLSSL) has increasingly attracted attention. However, existing OLSSL algorithms generally assume that the distributions between known and novel categories are nearly identical. Against this backdrop, we construct a more Realistic Open-world Long-tailed Semi-supervised Learning (ROLSSL) setting where there is no premise on the distribution relationships between known and novel categories. Furthermore, even within the known categories, the number of labeled samples is significantly smaller than that of the unlabeled samples, as acquiring valid annotations is often prohibitively costly in the real world. Under the proposed ROLSSL setting, we propose a simple yet potentially effective solution called dual-stage post-hoc logit adjustments. The proposed approach revisits the logit adjustment strategy by considering the relationships among the frequency of samples, the total number of categories, and the overall size of data. Then, it estimates the distribution of unlabeled data for both known and novel categories to dynamically readjust the corresponding predictive probabilities, effectively mitigating category bias during the learning of known and novel classes with more selective utilization of imbalanced unlabeled data. Extensive experiments on datasets such as CIFAR100 and ImageNet100 have demonstrated performance improvements of up to 50.1%, validating the superiority of our proposed method and establishing a strong baseline for this task. For further researches, the experimental code will be open soon."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Yuanpeng_He1",
"~Lijian_Li1",
"~Xiancai_Chen1",
"~Chi-Man_Pun1",
"~Wenpin_Jiao1",
"~Zhi_Jin1"
]
},
"authors": {
"value": [
"Yuanpeng He",
"Lijian Li",
"Xiancai Chen",
"Chi-Man Pun",
"Wenpin Jiao",
"Zhi Jin"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Open-world",
"Realistic long-tailed semi-supervised learning",
"Logit adjustment"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "he|towards_realistic_longtailed_semisupervised_learning_in_an_open_world"
},
"pdf": {
"value": "/pdf/2cd398bf25fc2602a15dc6780847e91c92d2059c.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Towards Realistic Long-tailed Semi-supervised Learning in an Open World"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
zLaayPL8f0 | Decomposition Ascribed Synergistic Learning for Unified Image Restoration | main | Active | Image Restoration;Decomposition;Orthogonality;Signal formation | learning theory | 3;3;5;6 | 5;4;4;4 | 2;2;3;3 | 2;2;3;3 | 3;1;2;3 | 4.25 | 4.25 | 2.5 | 2.5 | 2.25 | -0.555556 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see the above Weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper uncovers an observation that the decomposed singular vectors and values naturally undertake the different types of degradation information, ascribing various restoration tasks into two groups, i.e., singular vector dominated and singular value dominated.\n2. Two operators are developed to favor the decomposed optimization of degraded singular vectors and values for various restoration tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Decomposition Ascribed Synergistic Learning (DASL), a method for unified image restoration that optimizes singular vectors and values for handling multiple degradations within a single model, and integrates seamlessly into existing restoration architectures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. There is a limited evaluation with recent AIR methods like PromptIR, GenLV, and the diffusion-based AIR techniques. Such comparisons ensure the research is aligned with the latest advancements in the field.\n2. Do the observations regarding the decomposition of singular vectors and singular values hold true in terms of other degradations (low-resolution, jepg compression, etc.) or compound degradations (noise+blur, rain+haze, etc.)? Whether the proposed method is available in multiple degradation combinations?\n3. How well do the decomposed singular vectors and singular values represent degradation information, and are they discriminative enough to distinguish between different degradations?\n4. Some typos like \", ,\"."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Are there specific references supporting the findings presented in Figures 1 and 2? Additionally, clarification is needed on the type of Singular Value Decomposition (SVD) employed in the analysis and whether any post-processing techniques were applied to produce the displayed images. It is noted that the deblurring case exhibits a relatively large area of damaged pixels, while the hazy and low-light image enhancement cases only show minor pixel degradation. What factors contribute to this discrepancy?\n\nThe authors conducted tests on underwater enhancement and sandstorm enhancement but did not include these results in Figure 1 & 2. Are the observations in Figure 1 commonly encountered across different datasets, including underwater and sandstorm? Moreover, Figure 2 is based on 100 images; Additional images should be tested to validate the findings. It is unclear whether only synthetic images were used to generate Figures 1 and 2, such as in the rainy case.\n\nThe assumptions regarding singular vectors capturing content information and spatial details, and singular values representing global statistical properties, are questionable. The authors should provide more supporting references for these claims. Furthermore, given that degradations are categorized into two groups, does the proposed method require prior knowledge of which group the degradation falls into before reconstruction?\n\nA clear diagram illustrating the entire blueprint of the proposed methods should be included to demonstrate how the SVEO and SVAO are integrated into the algorithm. The current description lacks clarity, particularly the statement about substituting convolution layers with SVEO.\n\nThe experiments primarily report PSNR results; however, additional metrics should also be considered to provide a more comprehensive evaluation of the proposed method's performance.\n\nThe rationale for using the L1 norm in L_dec should be clarified.\n\nFigure 5 does not effectively illustrate progressive component reconstruction as intended.\n\nTypos in the abstract: 'tasks into two groups, , '"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper addresses a significant gap in the field by proposing a unified approach to handling multiple image degradations, moving beyond the conventional practice of treating each degradation in isolation.\n\nThe use of singular value decomposition to categorize restoration tasks into singular vector dominated and singular value dominated groups provides a fresh perspective on the relationships between different types of degradations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The manuscript explored the restoration of multiple image degradations through a unified model. By employing singular value decomposition, the authors classified restoration tasks into singular vector dominated and singular value dominated categories. They introduced the Decomposition Ascribed Synergistic Learning (DASL) framework, which optimized the decomposed components to exploit relationships among various restoration tasks. The framework included two operators, Singular VEctor Operator (SVEO) and Singular VAlue Operator (SVAO), along with a supporting decomposition loss."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The manuscript raises several important questions regarding its methodology and results that require clarification.\n\nThe current description lacks clarity."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "[Q1] As per my understanding, the orthogonal regularization loss and decomposition loss in formula (5) represent the reconstruction loss of singular vectors and singular values respectively, and different dominant factors are employed for various types of degradation. During the training process, the balanced weights are set as a fixed ratio. Will this setting negatively harm the mixed results?\n\n[Q2] In Figure 2, which baseline method did you use? It seems that the exchange reconstruction error for dehazing is very close. Thus, this figure can't convince me of the effectiveness of the proposed idea in dehazing task.\n\n[Q3] This work aims to use a single model to handle multiple kinds of degraded images with one degradation type. How does it perform on images that contain multi-types of degradation? What is the difference in performance between your model and a model that trains only with a single type of degradation?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "[S1] Lightweight networks have great application prospects at present.\n\n[S2] DASL reduces the computations of the baseline model and improves the effect."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper mainly proposes Decomposition Ascribed Synergistic Learning (DASL) to restore multiple image degradations by SVD and FFT. Specifically, diverse degradations are ascribed into two groups, singular vector-dominated degradations and singular value-dominated degradations. The proposed DASL dedicates the decomposed optimization of them respectively, rendering a more unified perspective to inherently utilize the potential partnership among diverse restoration tasks for ascribed synergistic learning. Two effective operators SVEO and SVAO have been developed to favor the decomposed optimization. Experimental results demonstrate that DASL improves restoration quality while reducing model parameters and accelerating inference speed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "[W1] The paper writing is very poor, containing many typos. Many sentences, paragraphs, and sections are difficult to understand. \n\n[W2] The proposed method applies the SVD in the FFT space and validates its effectiveness to some extent. However, it does not clearly explain the insight and why it works.\n\n[W3] More visualization results of DASL+baseline should be presented rather than only MPRNet. In addition, in Fig. 14-16, the superiority of the proposed method is hard to distinguish. \n\n[W4] The article presumes that the SVD and IDFT operate similarly in terms of signal formation, but does not show the Ablation Study of the restore results."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Same as described in the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is easy to understand and has a clear overall structure.\n2. Exchange of the singular value and the singular vector analysis is performed on the image restoration tasks of different degradation types, showing the role of singular values and singular vectors in image restoration.\n3. The singular value operator and singular vector operator are proposed to make the existing methods lighter, with some improvement in computational complexity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The goal of this paper is to solve the image restoration task of multiple image degradation types. The paper explores the relationship between singular values and singular vectors between degraded images and clear images in different degradation types. Singular value-dominated and singular vector-dominated image restoration are ascribed. Singular vector operators and singular value operators are proposed and lightly integrated into the existing image restoration backbone. Experiments on multiple degradation types are conducted to verify the effectiveness of the method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Figure 2, the authors slightly show that the decomposed singular values and singular vectors between different degradation types undertake different degradation information. Specifically, the exchange reconstruction error of Figure 2 (a) in the dehazing task is close to 50% for both singular value and singular vector, and the low-light and haze degradation in Figure 2 (b) are similar to other degradations such as rain and noise, while the blur and low-light in Figure 2 (c) are also very close. Therefore, I think that the authors don't explain the relationship between singular values and singular vectors and degradation information well. The assumptions proposed are also untenable.\nThe authors could consider conducting an in-depth analysis in high-dimensional space or frequency space to analyze the impact of singular values and singular vectors in terms of signal-to-noise ratio, frequency changes, etc. Even ablation analysis without using singular values or singular vectors can be used instead of just statistical analysis of reconstruction errors.\n\n2. The experiments only include results on a single degradation task dataset, not on a mixed dataset. This does not meet the requirement of using a single model for a unified image restoration task. The authors could refer to the similar experimental setup in the work of Li et al. [1] to conduct experiments on mixed degradation datasets to demonstrate the effectiveness and robustness of the proposed DASL. \n\n3. The stability improvements brought by the loss curves of the training trajectories in Figure 6 and Figure 7 are also not clearly demonstrated for tasks such as denoise and derain, etc. The authors should include the corresponding intermediate feature map changes or any other visualization statistics that can assist in demonstrating the effectiveness of singular values and singular vectors.\n\n[1] Li, Boyun, Xiao Liu, Peng Hu, Zhongqin Wu, Jiancheng Lv, and Xi Peng. \"All-in-one image restoration for unknown corruption.\" In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 17452-17462. 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024decomposition,\ntitle={Decomposition Ascribed Synergistic Learning for Unified Image Restoration},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zLaayPL8f0},\nnote={under review}\n}"
},
"abstract": {
"value": "Learning to restore multiple image degradations within a single model is quite beneficial for real-world applications. Nevertheless, existing works typically concentrate on regarding each degradation independently, while their relationship has been less comprehended to ensure the synergistic learning. To this end, we revisit the diverse degradations through the lens of singular value decomposition, with the observation that the decomposed singular vectors and singular values naturally undertake the different types of degradation information, dividing various restoration tasks into two groups, \\ie, singular vector dominated and singular value dominated. The above analysis renders a more unified perspective to ascribe diverse degradation connections, compared to previous task-level independent learning. The dedicated optimization of degraded singular vectors and singular values inherently utilizes the potential partnership among diverse restoration tasks, attributing to the Decomposition Ascribed Synergistic Learning (DASL). Specifically, DASL comprises two effective operators, namely, Singular VEctor Operator (SVEO) and Singular VAlue Operator (SVAO), to favor the decomposed optimization, which can be lightly integrated into existing image restoration backbone. Moreover, the congruous decomposition loss has been devised for auxiliary. Extensive experiments on five image restoration tasks demonstrate the effectiveness of our method."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Image Restoration",
"Decomposition",
"Orthogonality",
"Signal formation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/6e6d558f084294c197e57b496bbf9b1af751c5fd.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Decomposition Ascribed Synergistic Learning for Unified Image Restoration"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zM92zziRtQ | TLCM: Training- efficient Latent Consistency Model for Image Generation with 2-8 Steps | main | Active | latent diffusion model;consistency model;acceleration | generative models | 3;3;5;5;5 | 4;4;5;4;4 | 2;2;3;2;3 | 1;1;2;2;2 | 3;1;3;2;2 | 4.2 | 4.2 | 2.4 | 1.6 | 2.2 | 0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "As in weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "-\tThe authors propose a two-stage distillation scheme that progressively distills the model to a few-step regime with reduced training costs.\n-\tThe proposed method achieves superior results with SDXL on COCO-5k generation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose TLCM (Training-efficient Latent Consistency Model) for accelerating text-to-image latent diffusion models in a data-free manner with a small amount of training time. The key innovation is a two-stage distillation process. A data-free multistep latent consistency distillation (MLCD) is proposed to accelerate the base model, followed by another improved data-free latent consistency distillation to ensure global consistency. The authors enhance the model's performance through several techniques including latent LPIPS for perceptual consistency, multi-dimensional preference learning, distribution matching, and adversarial learning. Their empirical results show that TLCM can generate high-quality images in just 2-8 inference steps."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-\tThe submission is basically a technical report on achieving the SOTA few-step image generation performances with data-free multi-step consistency distillation, which is a successful practical combination of [1] and MCM. The second distillation stage also uses a series of SOTA techniques including ADD, DMD, and preference tuning. There are very limited scientific insights or technical innovations.\n-\tThe current presentation is poor and needs substantial improvement. For instance, there is extensive and unnecessary usage of acronyms, making the submission so confusing and hard to read.\n-\tLimited empirical evaluation. The method is only validated on SDXL. How does it perform on other diffusion models with different architectures, e.g., Pixart?\n\nReferences\n\n1.\tKohler, Jonas, et al. \"Imagine flash: Accelerating emu diffusion models with backward distillation.\" arXiv preprint arXiv:2405.05224 (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "no"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see above"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Advantages:\n1. While straightforward, the idea shows some novelty\n2. The writing is acceptable, though the innovation isn't immediately apparent"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a direct combination of Multistep Consistency Models [1] and Latent Consistency Models [2], resulting in a Multistep Latent Consistency Model.\n\nAdvantages:\n1. While straightforward, the idea shows some novelty\n2. The writing is acceptable, though the innovation isn't immediately apparent\n\nDisadvantages:\n1. The experimental results appear counterintuitive: contrary to normal expectations where more sampling steps yield better results, this paper shows deteriorating FID scores with increased sampling steps\n2. The paper lacks directness - the core innovations are hard to identify, as the authors seem to attempt highlighting multiple contribution points\n3. The generated images predominantly show a cyberpunk style, lacking photorealism and overall quality\n\nTechnical Issues:\n1. Equation 1 appears incorrect - contains an extra x_t term\n Should be: dx_t = f(x_t, t)dt + g(t)dw_t\n\n2. Results Inconsistencies:\n - Tables 1 and 3 show FID increasing with more sampling steps, contradicting common understanding\n - Ablation study shows puzzling results: combining all techniques significantly worsens FID scores\n - Equations 11-13 look unnecessary, just traditional confrontational losses.\n\nWriting Concerns:\nThe paper lacks focus on core contributions. It is hard for me to distinguish core contributions.\n\nReferences:\n[1] Heek et al., \"Multistep consistency models,\" 2024\n[2] Luo et al., \"Latent consistency models,\" 2023\n[3] Song et al., \"Consistency models,\" 2023"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Disadvantages:\n1. The experimental results appear counterintuitive: contrary to normal expectations where more sampling steps yield better results, this paper shows deteriorating FID scores with increased sampling steps\n2. The paper lacks directness - the core innovations are hard to identify, as the authors seem to attempt highlighting multiple contribution points\n3. The generated images predominantly show a cyberpunk style, lacking photorealism and overall quality"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "(L144-145) This is a vague statement. What is the meaning of the effectiveness?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Interesting idea to use synthetic data for distillation.\n- Reasonable motivations."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Existing methods for distilling the pretrained knowledge of Diffusion Models have several drawbacks; expensive training time, necessity of large scale real data, and image generation quality given a few steps to generate an image. To this end, the authors propose a data-free distillation pipeline named as Training Efficient Latent Consistency Model (TLCM). Briefly, the initial parameter of TLCM is obtained by the proposed first process, and the second process boosts global consistency of TLCM. The experiment results show that TLCM has benefits over the baselines for the short training time and the data-free mechanism."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Overview figure is very complex to understand. My suggestion is that it would be better if the right figure is decomposed into two parts; section 4.2 and section 4.3\n- Weak explanations on the experiment settings.\n - What is the meaning of each measure? \n - Is higher better or lower better? \n - How many samples are used? \n - What was the input to generate the samples to measure?\n - It sounds more reasonable if the proposed methods contain only IS or FID like [1]. The aim of the work is to distill the knowledge of the pretrained models and reduce the required sampling steps to some extent. Shouldn’t it enough If the generated images by TLCM (2-8 steps) are as realistic as DDIM? Please provide justifications why the other metrics are needed in detail. \n- Weak interpretations of the experiment results.\n - (L364-L365) The proposed methods use synthetic data to distill the pretrained knowledge while the baselines are using real data. How does the proposed method show better performance than the baselines? It sounds more reasonable if the baseline is a sort of upper bound.\n - FID of TLCM (2 steps) is better than FID of TLCM (8 steps). It also shows the pattern that the # of steps and FID are inversely proportional, which is not reasonable. \n - Prioritize what information is important for each table and emphasize those numbers.\n\n- Too many engineering techniques are applied for improving the marginal performance. They dilute the main point of the paper.. \n\nOverall, the proposed methods are interesting, but the results (FID) are not convincing enough.\n\n[1] PROGRESSIVE DISTILLATION FOR FAST SAMPLING OF DIFFUSION MODELS, ICLR’22"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- After the emergence of rectified flow transformer, I think these kinds of distillation methods should be re-assessed. Apart from the non-predictable nature of the original diffusion trajectory, we now can learn the straightened flow of latent space and the distillation must be a lot easier. Even in the paper of SD3, it says that if the model capacity is given enough, reducing timesteps into a small number sacrifices little performance. This is not a critical comment about this paper and I would recommend the authors to think about this aspect."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Explanation about existing methods have been given enough. It is easy to follow and the application of each component sounds reasonable.\n- Resulting images with only 4 steps look quite impressive.\n- Necessary ablation studies to make this method solid have been provided."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "- The authors propose a multi-step distribution matching diffusion model to generate samples within few steps. By dividing the whole iterations into several predefined steps, the consistency model can focus on matching distribution in narrower range.\n- Adversarial loss, latent-LPIPS loss, and human preference loss have also been adopted to boost the performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Even though the results are quite impressive, this method is a combination of Multistep Consistency Models(MCM), Distribution Matching Distillation(DMD), and DiffusionGAN. It is hard to ignore novelty issue and this would be the main reason of my decision.\n- Too many notations made me really hard to follow this paper. It would be much better if some expressions can be trimmed.\n\nMinor issue\n- The caption of the 3rd row in Fig.3 should be modified."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Does the 70GPU-hour training time include the time spent on data generation? The overall cost of data generation is needed to justify L241 'with cheap cost in data-free manner.'"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper presents the method in a very intuitive way with very informative equations and figures. The overall writing quality of this paper is good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a method for latent diffusion model distillation. The core idea of this paper is mainly inspired by latent consistency distillation. The authors implemented some improvements based on the previous success of diffusion distillation, such as sparse timesteps. To prevent the cost of collecting real-world data, the proposed method is trained on generated data from the teacher model instead. A bag of independent techniques such as adversarial training are further included to help the model match the SoTA performance. The evaluations are performed on SDXL."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Novelty\n\nThis paper presents insufficient novelties. The entire method seems to be an ensemble of diffusion distillation techniques already proven effective. For example, this paper advocates the advantage of relying on no training data and uses synthesized data instead. However, this has already been proven useful in SD3-turbo [1], and learning from generated data is by no means a new idea under the context of model distillation. \n\n- Empirical evaluations\n\nThis paper is presented with insufficient evaluations. SDXL, which is a relatively old model, is the only one evaluated in the paper. And there are some important comparisons missing such as SD3-turbo, DMD, and instaflow. \n\nThe authors claim, 'We only report FID for reference and do not analyze it since FID on COCO is not reliable to evaluate text-to-image models.' I agree that FID is not the best way for diffusion evaluation. However, the values do reveal something about the proposed methods. I noticed that the FID values increase with the number of steps for the proposed method. In my understanding, this is a result of learning distillation from generated data instead of real data, as the modeled distribution deviates further from the real-world data distribution. \n\n- Supports to claims \n\nI find some claims in the paper require further support. For example, the authors claimed 'They (other distillation methods) need to perform long-time learning with a huge volume of real data.' I find it very hard to agree with this. According to my experience, methods such as LCM can be trained very fast with a small portion of training data. The authors are strongly encouraged to fill in the 'TH' value for LCM in Table 1 for a direct comparison. For the rest of the metrics, I don't see how the proposed method clearly outperforms the rest, except for IR, which is a score that the model is explicitly optimized for. \n\n\n[1] Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion Distillation, arxiv."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a novel Training-efficient Latent Consistency Model (TLCM) to tackle the challenges of expensive cost and the performance drop when sampling with few steps in large distilled latent diffusion models."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024tlcm,\ntitle={{TLCM}: Training- efficient Latent Consistency Model for Image Generation with 2-8 Steps},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zM92zziRtQ},\nnote={under review}\n}"
},
"abstract": {
"value": "Distilling latent diffusion models (LDMs) into ones that are fast to sample from is attracting growing research interest. However, the majority of existing methods face two critical challenges: \n1) They need to perform long-time learning with a huge volume of real data. \n2) They routinely lead to quality degradation for generation, especially in text-image alignment. \n\nThis paper proposes the novel Training-efficient Latent Consistency Model (TLCM) to overcome these challenges. \nOur method first fast accelerate LDMs via data-free multistep latent consistency distillation (MLCD), then data-free latent consistency distillation is proposed to guarantee the inter-segment consistency in MLCD at low cost. \nFurthermore, we introduce bags of techniques to enhance TLCM's performance at rare-step inference without any real data, e.g., distribution matching, adversarial learning, and preference learning. \nTLCM demonstrates a high level of flexibility by allowing for adjustment of sampling steps within the range of 2 to 8 while still producing competitive outputs compared to full-step approaches.\nAs its name suggests, TLCM excels in training efficiency in terms of both computational resources and data utilization.\nNotably, TLCM operates without reliance on a training dataset but instead employs synthetic data for the teacher itself during distillation. With just 70 training hours on an A100 GPU, a 3-step TLCM distilled from SDXL achieves an impressive CLIP Score of 33.68 and an Aesthetic Score of 5.97 on the MSCOCO-2017 5K benchmark, surpassing various accelerated models and even outperforming the teacher model in human preference metrics. \nWe also demonstrate the versatility of TLCMs in applications including controllable generation, image style transfer, and Chinese-to-image generation."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"latent diffusion model",
"consistency model",
"acceleration"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f3969c19aede76b6812cadcca514c3cbb300b7e1.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "TLCM: Training- efficient Latent Consistency Model for Image Generation with 2-8 Steps"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zMjjzXxS64 | FreSh: Frequency Shifting for Accelerated Neural Representation Learning | main | Active | spectral bias;automatic hyperparameter selection;implicit neural representation;discrete fourier transform | applications to computer vision, audio, language, and other modalities | 1;5;6;8 | 4;4;3;4 | 3;2;3;4 | 2;3;3;3 | 4;2;3;4 | 5 | 3.75 | 3 | 2.75 | 3.25 | -0.226455 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The description of a vector spectrum by summing the DFT appears a little non-standard (L285-7). Could the authors provide a reference for this approach (potentially outside the INR literature?)\n- In Appendix C it is mentioned that the for image regression the same image is resampled 10 times due to randomness of the model output. The same network should be deterministic in output - does the randomness occur due to re-sampling the network with reinitialisation? \n- The paper mentions that other activations are addressed L131. Have experiments on non-periodic functions (e.g. Gaussian) been performed? \n- Could the authors provide details on the compute required for Fresh on more complex signals like NeRF (L414-6 notes that this is negligible for images). \n- Table 5 records video results. It mentions 'Results for NeRF are provided for reference only as it is incompatible with FreSh'. Could the authors clarify this?\n - Could the authors clarify what is meant by 'direction-dependent frequency magnitudes' and its importance for video (L510)? \n\nMinor Presentation: \n- The presentation of Figure 2 could be improved (visually the figure is difficult to interpret without the caption / main text). Tweaking this figure (perhaps additional labeling) would greatly improve the clarity of the method and extend its impact.\n- The font in figures 12, 13, 14 is inconsistent \n- The text size in Table 6 is inconsistent / appears to have been scaled \n- Figure 5 - suggest changing 'Thousands to steps' to 'Steps ('000)' \n- Figure 1 could be improved (the PSNR labels are difficult to distinguish)\n- Left-hand quotation makes can be done in Latex using the ` character"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "There are a number of strengths in this paper: \n- The issue is well-motivated: selection of frequency is a common frustration when using SIREN implicit neural representations. A method for a-priori selection of frequency rather than needing to grid-search across trained networks will be useful for the community. \n- The method is conceptually simple and should be easy to implement / include in INR pipelines meaning it may be broadly applicable.\n- Experimental results extensively tested with results averaged across seeds for multiple modalities including image datasets (WikiArt, FFHQ, Chest X-Ray, etc), video, and neural radiance fields. \n- The authors have incorporated their method in multiple pipelines (Siren, Fourier, Hashgrid embeddings, Finer). Results broadly indicate that the method leads to a performance improvements within these pipelines. \n- The experiments are well ablated, with key components (e.g. spectrum size n) evaluated with multiple configurations and datasets\n- The paper is well written, logically structured, and clearly presented. Existing literature is reviewed well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a method for selecting the frequency hyperparameter involved in using periodic activations in implicit neural representations. Selection of the frequency hyperparameter is critical to INR performance, and authors often either select arbitrarily e.g. $\\omega=30$, or employ a costly grid-search. The authors propose selecting this hyperparameter based on the target signal frequencies, by minimising the Wasserstein distance between the target signal spectrum and the untrained network output spectrum. This is conducted by searching across candidate frequencies. The authors demonstrate performance improvements across images, neural radiance fields, and videos. Overall this is a nicely written paper which introduces a simple method with interest to the INR community."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a few minor weaknesses in the paper. These principally relate to small aspects of presentation (e.g. Figure 2). I've listed these in the Questions below as they are largely minor issues rather than critical weaknesses. \n\nIn terms of technical weaknesses, while the method avoids a costly grid search across trained networks it still requires a grid search across candidate frequencies on untrained networks (this cost will be negligible in comparison). In addition, the method introduces an additional hyperparameter (selection of the spectrum cut-off). If I'm reading it correctly, this hyperparameter would still need a trained network to be evaluated (even though the authors note it transfers across signals). This could reduce some of the benefit of the method, especially if the frequency hyperparameter needs to be searched in combination with the frequency cut-off. Could the authors clarify this issue?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "> Similarly, Wire (Saragadam et al., 2023) uses very high frequencies and increases frequencies at each hidden layer, not just at the embedding layer. This makes it incompatible with FreSh\n\nI am curious if you can say more about these models (NeRF and Wire) which use very high frequencies. What purpose do these high frequency components serve if they are not reflected in the training data? Why don’t these models perform poorly as FreSh would predict?\n\nYou mention that your FreSh measurements are noisy due to random initialization, so you average across 10 initializations. [1] find that SIREN’s random initialization of the first layer has a significant effect on the final PSNR (Appendix A.6). I would be interested to see if your method could be further improved by using FreSh to select not only the hyperparameters, but also the random initialization to start from.\n\nIn the appendix you say that there is high variability in the model configurations selected by FreSh, even within a single dataset. But how much of this variability is random vs. actually important for performance? I would be curious to see how per-image FreSh compares to a dataset-wide FreSh: how much of the advantage of FreSh comes from optimizing for each individual image, and how much can be achieved just by making an appropriate hyperparameter choice for each dataset. \n\n[1] https://openreview.net/pdf?id=iKPC7N85Pf"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "Clear, simple, effective solution to a practical problem. Thorough demonstrations that it works. The paper is well-written and well-presented, easy to read and understand. I can imagine this technique being widely adopted for INR hyperparameter optimization."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Hyperparameter optimization is a tricky but important problem for implicit neural representations (INRs), since different hyperparameters may be optimal for different signals, but hyperparameter sweeps can be expensive. The authors introduce a fast proxy for determining if an INR’s hyperparameters are well-suited to a particular training signal. Instead of directly optimizing the post-training loss, the authors propose to minimize the Wasserstein distance between frequency distributions of the training signal and the newly initialized INR’s output. This skips the step of actually training each INR, dramatically reducing the time needed to perform a hyperparameter sweep. This proxy appears to perform quite well, conferring most of the performance advantage of a traditional hyperparameter grid search. The authors evaluate their method across a range of image datasets, videos and NeRFs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Section 5 contains a few typos:\n\nSection 5, paragraph 2, “a trail and error approach”\n\nSection 5.1: You fell victim to LaTeX’s backwards quotation marks, one of the classic blunders\n\nOtherwise the paper is quite solid."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Typically, spectral bias means that neural networks tend to fit the high-frequency component of the target signal. Therefore, spectral bias can be reflected as different learning speeds for different frequency components. Although the advanced activation functions such as Sine improve the representation performance, MLPs with Sine still first fits the low-frequency parts as shown in some paper such as “Improved implicit neural representation with Fourier reparameterzied Training”. Inspired by this phenomenon, these methods might not change the bias; rather, it broadens the range of frequencies the model can represent. Under this perspective, Fresh might shift some special spectrum distribution that enjoys the fastest learning speeds of MLPs towards the target signals. So the detailed description of this special spectrum distribution or the validation of the existence of this special spectrum might be an interesting problem. This point will not affect my scoring. If authors could try to find this special spectrum and do more exploration about this problem, I think that it will further deepen our understanding of deep learning."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The method of this paper is fairly easy to understand and flow. This is the first hyperparameter selection method for implicit neural representation models based on the idea of frequency alignment. \n2. The computation cost of this method can be ignored. \n3. Through relatively extensive experiments and the experiments on \"decreasing the default embedding frequency\" in the supplementary materials, this paper well demonstrates that this method could adjust part hyperparameters to adjust the “preferred spectrum” of MLPs, achieve better representation of target signals."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method for selecting the hyperparameters for current MLP-based INR models to improve the suboptimal representation performance of models with default hyperparameters. This method measures the Wasserstein distance between frequency distributions of initial-state models and the target signal and selects the hyperparameters that minimize the Wasserstein distance. This paper validates this method on several current INR models via 2D image approximation, 2D video fitting and neural radiance fields. Experimental results demonstrate that this method is effective to some extent."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The related work of this paper seems overly lengthy and overlooks some of the latest methods for spectral bias. For example, spectral bias can be overcame by special initialization and training dynamics adjustment, such as reparameterization and batch normalization. Moreover, as stated in the paper that Fresh is a simple initialization technique in line 024, Fresh should compare to the previous initialization method like “From Activation to Initialization: Scaling Insights for Optimizing Neural Fields”. \n\n2. There lacks a reliable theoretical or experimental link between the key observation (line 019) and Fresh (line 241,Table 2). More concretely, there need an explanation that the adjustment of embedding layer hyperparameters could affect the whole model's spectrum. A more quantitative expression would be better. The illustration in Fig. 1 seems to be vague. Clarification of this point would certainly raise my opinion of the paper.\n\n3. Some experimental results show that Fresh could not provide a better result even worse than baseline models, such as results with time input in Table 5 and in Table 6. Intuitively, Fresh should not obtain worse results. It might show that there are several unclear relationships. Solving the weakness 2. might help this issue.\n\n4. The experiment in Appendix A seems for demonstrating that weights of embedding layer could not be trained sufficiently by gradient descent. However, why is the optimization algorithm SGD? Meanwhile, is the index L2 norm (Eq. 8) used by the paper (720) meaningful? This experiment should be illustrated more clearly."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- Why not just have these parameters be learned rather than grid searched (as I believe omega_0 can be learnable at least, I am not sure about the others)? \n- 15,000 steps seems very excessive to fit a single image, how much wall clock does this take on a gpu for good results? Other methods like instant-ngp and spder (above) are much faster.\n- In 5.2 in experiments why is NeRF not used as a baseline also as it is mentioned in the paper? I am not sure which one it is in Table 6."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well written, and introduces a novel method of initialization by fitting the frequency spectrum of the untrained model with the desired downstream task. The logic is sound, and this concept may apply to other fields."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors notice that the initial frequency spectrum of an untrained model’s output correlates strongly with the model’s eventual performance on a given target signal. They want to “pretrain” the untrained model output with the end signal through a hyperparameter search over initialization parameters."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Evidence\n- The experimental evidence is extremely weak in my opinion and does not demonstrate a significant increase that would warrant other researchers’ usage of this method. Most results show improvements of less than 1dB, which is essentially identical to the naked eye.\n- The results in Table 1 are concerning. After 15,000 steps the difference between the Base Siren model and using Fresh have virtually no difference (PSNRs of 31.18 and 31.81). \n- Table 3 - Why does FreSH have no improvement on Finer and Finer with k=0 and most samples? If it is only applicable to the SIREN and Fourier features model it would be way too limited to warrant an accept as these are several years old baselines.\n- Moreover in Table 6, overall the results are not impressive as most PSNRs increase by < 1dB (i.e. Finer has virtually no change)\n- Table 8, why does Fresh hurt the results when you include time? It seems as expected input for video.\n\nBaselines\n- Paper seems to have a lot of overlap with SPDER https://arxiv.org/pdf/2306.15242 (especially in theory, and that the untrained model output aligns with end reconstruction) and should compare the baseline against it.\n- Other baselines should be (Ramasinghe & Lucey, 2022) which suggests Gaussian activations. It is mentioned in the paper but not reported on to the best of my knowledge.\n- Instant NGP (HashGrid?) may also be used in the image baseline, as it excels in high-resolution images but is not included here for that.\n- Overall, SIREN seems to be a very weak baseline model. I understand that tuning the omega_0 parameter can improve results, but I don’t believe this to be as significant of a contribution on top of the existing model.\n\nComplexity\n- Although computationally cheap (utilizing random initialization and DFT), it adds much more complexity to the setup\n- For example, on the Wasserstein measurement setup, “To prevent this from affecting the selection process, we measure the Wasserstein distance 10 times and use its mean to select.” I feel this is too much of a hack to be reliably used by other researchers, but I may yield to what other reviewers say.\n- Authors could clarify what the parameter values represent in the first section of Experiments for ease of reading.\n\nImpact\n- It is stated the method is incompatible for “extremely high frequencies” (i.e. NeRF and Wire), which makes me doubt its generalizability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024fresh,\ntitle={FreSh: Frequency Shifting for Accelerated Neural Representation Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zMjjzXxS64},\nnote={under review}\n}"
},
"abstract": {
"value": "Implicit Neural Representations (INRs) have recently gained attention as a powerful approach for continuously representing signals such as images, videos, and 3D shapes using multilayer perceptrons (MLPs). However, MLPs are known to exhibit a low-frequency bias, limiting their ability to capture high-frequency details accurately. This limitation is typically addressed by incorporating high-frequency input embeddings or specialized activation layers. In this work, we demonstrate that these embeddings and activations are often configured with hyperparameters that perform well on average but are suboptimal for specific input signals under consideration, necessitating a costly grid search to identify optimal settings. Our key observation is that the initial frequency spectrum of an untrained model's output correlates strongly with the model's eventual performance on a given target signal. Leveraging this insight, we propose frequency shifting (or FreSh), a method that selects embedding hyperparameters to align the frequency spectrum of the model’s initial output with that of the target signal. We show that this simple initialization technique improves performance across various neural representation methods and tasks, achieving results comparable to extensive hyperparameter sweeps but with only marginal computational overhead compared to training a single model with default hyperparameters."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"spectral bias",
"automatic hyperparameter selection",
"implicit neural representation",
"discrete fourier transform"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/6e20622979b5ee9c810123e9c5d4316d41f6fffe.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/056dfc49e2068f5943905e3f00585770577c2c4f.zip"
},
"title": {
"value": "FreSh: Frequency Shifting for Accelerated Neural Representation Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zNVefjN3EP | OpenCarbonEval: How much $CO_2$ will your large model exhale in training process? | main | Active | Large-scale model;Carbon footprint;Sustainable AI | alignment, fairness, safety, privacy, and societal considerations | 3;3;5 | 4;2;1 | 1;1;1 | 2;2;2 | 2;2;2 | 3.666667 | 2.333333 | 1 | 2 | 2 | -0.755929 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Thank you for submitting to ICLR 2025. The research problem is both interesting and timely. I have the following questions:\n\n- The differences between OpenCarbonEval and related works, such as CodeCarbon and Eco2AI, appear to be mainly focused on engineering efforts. For example, they could just refactor the code and adopt equation (1). Could the authors expand on their comparisons from a research perspective or discuss additional experimental results?\n\n- The accuracy of estimating f(t) needs improvement. Could the authors clarify how they derived the function, such as comparing with other possible functions?\n\n- Distributed deep learning systems could be complex. Models mapped onto the same class of GPUs can result in varying energy consumptions. Could the authors provide experimental results on how OpenCarbonEval accounts for differences in energy consumption when adjusting for data parallelism, tensor parallelism, pipeline parallelism, and data offloading? Additionally, how does the carbon footprint scale with the number of GPUs?\n\n- The authors have conducted a substantial number of experiments. Are there any key takeaways from these experimental results? For instance, how does the knowledge of CO₂ emissions impact future training strategies?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- Timely problem\n- Good motivation"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a tool named OpenCarbonEval to estimate energy consumption and carbon emissions during the training process of large ML models. The authors present a new formulation for estimating the training carbon footprint of various models and evaluate the effectiveness of their approach in comparison to related work."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Novelty\n- Soundness\n- Insufficient hardware details"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Address all the questions raised in the weakness points, mentioned in the following again:\n\n1.\tInadequate Justification for the α Parameter: The derivation of the α parameter lacks theoretical depth, as the paper does not substantiate the choice of logarithmic modeling. Providing empirical or theoretical evidence for using f(t)=ln(1+αt) would strengthen its validity; a comparison with alternative functions could clarify this choice.\n2.\tLimited Model Generalization: OpenCarbonEval does not convincingly show its ability to generalize across diverse ML tasks and architectures. The adaptability of the α parameter remains unclear, particularly for models outside the initial dataset. Additional validation across a wider range of model types by extending Table 1, 2 will reinforce its versatility. Detailed results for the validation of the method is required.\n3.\tLack of Explanation for Equations: The paper lacks the connection between equation (2) and equation (3), and also lacks the explanation of how the Lcomputation is used to estimate the energy consumption E. Moreover, the Clifelong needs to be elaborated in terms of how it is attained. \n4.\tComparison with results for LLMCarbon: Can the authors present the analysis of same models and hardware combinations presented in Table 4 in the LLMCarbon paper?\n5.\tJustification or Citation for Assumption: The assumption of 1-year GPU lifespan for the embodied carbon estimation lacks justification or citation from a reliable source. \n6.\tOverlooked Factors in Operational Carbon Calculation: OpenCarbonEval does not account for essential factors like Power Usage Effectiveness (PUE) in data centers, leading to potential underestimations of emissions. Including PUE in calculations would create a more realistic operational carbon estimate.\n7.\tSimplistic Treatment of Training Dynamics: OpenCarbonEval applies Little’s Law simplistically, assuming a steady state in training dynamics, which oversimplifies the training process. More practical grounding, perhaps through empirical evidence, would enhance applicability in ML contexts. LLMCarbon addresses this by using detailed hardware efficiency and optimal parallelism settings, providing a robust framework for accurately modeling training dynamics.\n8.\tEmbodied Carbon Calculation: OpenCarbonEval’s approach to embodied carbon appears oversimplified, lacking in-depth parameters that affect emissions, such as hardware-specific manufacturing and lifetime estimates. Moreover, the Clifelong needs to be elaborated in terms of how it is attained.\n\nOpenCarbonEval addresses a timely topic and proposes interesting methods for estimating emissions, but fundamental conceptual and methodological gaps needs to be clarified. Without rigorous validation, robust comparisons, and clearer theoretical grounding for key parameters, the method may not yet be practical for diverse ML scenarios. Addressing these weaknesses could make OpenCarbonEval a valuable contribution in the future."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1.\tRelevant Topic: The environmental impact of large ML models is an important concern, and OpenCarbonEval’s focus on a general framework for carbon footprint estimation. OpenCarbonEval showcases improved error rate in comparison to LLMCarbon across various large-scale ML models.\n\n2.\tMulti-Domain Scope: The method’s attempt to generalize across model types, hardware types, and tasks, potentially making it more versatile than existing carbon estimation among the estimation methods. \n\n3.\tDataset Creation: OpenCarbonEval contributes an open resource by curating a dataset of carbon emissions containing 110 records."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents OpenCarbonEval, an innovative approach for estimating the carbon footprint of training large ML models, with claims to improve prior models by incorporating hardware-specifications, embodied and operational carbon estimation, and dynamic power consumption. It introduces an α parameter to model dynamic power consumption and introduces an open-source dataset of 110 models across multiple large-scale ML tasks for validation of the proposed approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tInadequate Justification for the α Parameter: The derivation of the α parameter lacks theoretical depth, as the paper does not substantiate the choice of logarithmic modeling. Providing empirical or theoretical evidence for using f(t)=ln(1+αt) would strengthen its validity; a comparison with alternative functions could clarify this choice.\n2.\tLimited Model Generalization: OpenCarbonEval does not convincingly show its ability to generalize across diverse ML tasks and architectures. The adaptability of the α parameter remains unclear, particularly for models outside the initial dataset. Additional validation across a wider range of model types by extending Table 1, 2 will reinforce its versatility. Detailed results for the validation of the method is required.\n3.\tLack of Explanation for Equations: The paper lacks the connection between equation (2) and equation (3), and also lacks the explanation of how the Lcomputation is used to estimate the energy consumption E. Moreover, the Clifelong needs to be elaborated in terms of how it is attained. \n4.\tComparison with results for LLMCarbon: Can the authors present the analysis of same models and hardware combinations presented in Table 4 in the LLMCarbon paper?\n5.\tJustification or Citation for Assumption: The assumption of 1-year GPU lifespan for the embodied carbon estimation lacks justification or citation from a reliable source. \n6.\tOverlooked Factors in Operational Carbon Calculation: OpenCarbonEval does not account for essential factors like Power Usage Effectiveness (PUE) in data centers, leading to potential underestimations of emissions. Including PUE in calculations would create a more realistic operational carbon estimate.\n7.\tSimplistic Treatment of Training Dynamics: OpenCarbonEval applies Little’s Law simplistically, assuming a steady state in training dynamics, which oversimplifies the training process. More practical grounding, perhaps through empirical evidence, would enhance applicability in ML contexts. LLMCarbon addresses this by using detailed hardware efficiency and optimal parallelism settings, providing a robust framework for accurately modeling training dynamics.\n8.\tEmbodied Carbon Calculation: OpenCarbonEval’s approach to embodied carbon appears oversimplified, lacking in-depth parameters that affect emissions, such as hardware-specific manufacturing and lifetime estimates. Moreover, the Clifelong needs to be elaborated in terms of how it is attained."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 1
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please comment on the following weakness:\n\n1. **Simplified yet more accurate formulation???**: The functions presented in Equations (3) through (7) lack clarity in their intended function and accuracy. While polynomial approximations may lack precision, Equation (7) is simplified even further than LLMCarbon, containing only a single parameter compared to the multi-parameter nature of polynomial approximations. Why is this single-parameter approach purported to yield higher accuracy? The authors are encouraged to offer detailed explanations or empirical validation demonstrating how and why Equation (7) leads to improved accuracy over traditional polynomial approximations.\n\n2. **Consideration of GPU count and parallelism settings**: The paper does not discuss varying GPU counts in training configurations, appearing to assume a single-GPU setup. It also does not address different training parallelism types, such as data, tensor, pipeline, or expert parallelism, all of which may affect results depending on GPU count. Without incorporating these parallelism factors, it is unclear how OpenCarbonEval achieves greater accuracy. How does this work account for different parallelism strategies, and are there empirical results confirming its accuracy across these configurations? Additionally, Figure 4 lacks context: how many GPUs are represented, why do some GPUs exhibit smaller variance, and how many GPUs are used for training in Tables 1 and 2?\n\n3. **Lack of model architecture information**: The study appears to consider only the number of parameters in ML models, without accounting for architecture specifics. While scaling laws suggest that architecture does not impact model accuracy, it significantly affects training throughput across various architectures (see Megatron paper: https://parsa.epfl.ch/course-info/cs723/papers/Megatron.pdf). The authors should provide empirical evidence to demonstrate that model architecture does not impact the carbon footprint of training.\n\n4. **Dataset limitations**: The dataset used is limited and lacks comprehensive real-world data. Among the 863 entries in the provided table (https://epochai.org/data/notable-ai-models?view=table), only 176 entries include training times, 158 provide GPU counts, and only 31 report hardware utilization, leaving most entries without training times or hardware utilization data. With such limited information, how is \\( f(x) \\) in Equation (5) trained and validated? Furthermore, 603 of the 863 entries are classified as \"likely,\" \"speculative,\" or \"no confidence.\" Does OpenCarbonEval rely on these uncertain data points for validation while claiming higher accuracy? The authors should discuss the limitations associated with the dataset quality and address the impact on the reliability of their conclusions."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The paper works on an important topic.\n2. The paper identifies the shortcoming of preivous works (Faiz et al., 2023): the polynomial approximation for the system efficiency and hardware utiliation estimation is not accurate."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The training of machine learning (ML) models significantly contributes to global carbon emissions. This paper introduces OpenCarbonEval, an advanced estimation tool designed to quantify the carbon impact of large-scale ML models based on their total training computations and hardware configurations. The tool's accuracy is validated using real-world datasets, and experimental results demonstrate that OpenCarbonEval provides more precise predictions of energy consumption and carbon emissions than previous approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Simplified yet more accurate formulation???**: The functions presented in Equations (3) through (7) lack clarity in their intended function and accuracy. While polynomial approximations may lack precision, Equation (7) is simplified even further than LLMCarbon, containing only a single parameter compared to the multi-parameter nature of polynomial approximations. Why is this single-parameter approach purported to yield higher accuracy? The authors are encouraged to offer detailed explanations or empirical validation demonstrating how and why Equation (7) leads to improved accuracy over traditional polynomial approximations.\n\n2. **Consideration of GPU count and parallelism settings**: The paper does not discuss varying GPU counts in training configurations, appearing to assume a single-GPU setup. It also does not address different training parallelism types, such as data, tensor, pipeline, or expert parallelism, all of which may affect results depending on GPU count. Without incorporating these parallelism factors, it is unclear how OpenCarbonEval achieves greater accuracy. How does this work account for different parallelism strategies, and are there empirical results confirming its accuracy across these configurations? Additionally, Figure 4 lacks context: how many GPUs are represented, why do some GPUs exhibit smaller variance, and how many GPUs are used for training in Tables 1 and 2?\n\n3. **Lack of model architecture information**: The study appears to consider only the number of parameters in ML models, without accounting for architecture specifics. While scaling laws suggest that architecture does not impact model accuracy, it significantly affects training throughput across various architectures (see Megatron paper: https://parsa.epfl.ch/course-info/cs723/papers/Megatron.pdf). The authors should provide empirical evidence to demonstrate that model architecture does not impact the carbon footprint of training.\n\n4. **Dataset limitations**: The dataset used is limited and lacks comprehensive real-world data. Among the 863 entries in the provided table (https://epochai.org/data/notable-ai-models?view=table), only 176 entries include training times, 158 provide GPU counts, and only 31 report hardware utilization, leaving most entries without training times or hardware utilization data. With such limited information, how is \\( f(x) \\) in Equation (5) trained and validated? Furthermore, 603 of the 863 entries are classified as \"likely,\" \"speculative,\" or \"no confidence.\" Does OpenCarbonEval rely on these uncertain data points for validation while claiming higher accuracy? The authors should discuss the limitations associated with the dataset quality and address the impact on the reliability of their conclusions."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024opencarboneval,\ntitle={OpenCarbonEval: How much \\${CO}\\_2\\$ will your large model exhale in training process?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zNVefjN3EP},\nnote={under review}\n}"
},
"abstract": {
"value": "Data, model and hardware are crucial components in the development of large scale machine learning models. The training of such models necessitates substantial computational resources, energy consumption, and raw materials, resulting in significant environmental implications. However, the environmental impact of these models has been largely overlooked due to a lack of assessment and analysis of their carbon footprint. In this paper, we present OpenCarbonEval, a carbon emission estimation framework to quantify the environmental implications of large scale machine learning models given their total training computations and hardware configurations.\nIn OpenCarbonEval, we conducted a comprehensive dynamic analysis of the interrelationships among data, models, and hardware throughout the model training process, aiming to forecast the carbon emission of large scale models more accurately. We validated our approach on real-world dataset, and experimental results demonstrate that OpenCarbonEval can predict energy costs and carbon emissions more accurately than previous methods. Furthermore, it can be seamlessly applied to various machine learning tasks without a precision decline. By quantifying the environmental impact of large-scale models, OpenCarbonEval promotes sustainable AI development and deployment, contributing to a more environmentally responsible future for the AI community."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large-scale model",
"Carbon footprint",
"Sustainable AI"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/21de26db970c47e03922ea15e488742bea430b6c.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/394081c87d9bafef81d82d22278e391edf52ada8.zip"
},
"title": {
"value": "OpenCarbonEval: How much $CO_2$ will your large model exhale in training process?"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zNfdtV9ADQ | SRPCA: Sparse Reverse of Principal Component Analysis for Fast Low-Rank Matrix Completion | main | Active | matrix completion;low rank;PCA;collaborative filtering;image inpainting;time-series imputation | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 1;3;5;6 | 4;4;3;5 | 1;3;3;3 | 1;2;2;3 | 1;2;3;3 | 3.75 | 4 | 2.5 | 2 | 2.25 | 0.184115 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "A few additional comments:\n - One of the problems I have is that there already exist many approaches based on the SVD. \n- Line 69/70 “Sparse Reverse of the Principle Component Analysis” —> “Principal Component Analysis”\n- You should really simplify some of your introduction. E.g. “The proposed approach maintains a high level of smoothness by iteratively finding the principal components of the matrix based on the predicted values of both the missing and the observed parts of the matrix… ”. The sentence is way too long. I think I would remove the whole paragraph. \n- I would remove at least 2 or 3 items from page 2 (lines 76 to 90) and give the algorithm. You mention your algorithm only vaguely by saying that it updates a single component. It would be a lot clearer to have a short, clear exposition of the algorithm (even informal). E.g. just give (12) - (14) with a few words of explanation. \n- line 128/129, in your exposition of ALS, it is not clear why the entries are partitioned into subsets. That would be worth and explanation. Also when is the SVD computed. You say that it is not applied at each iteration. When is it applied then?\n- line 136/137 “we refer readers ” —> “we refer readers to…”\n- You should reorganize your explanation of PCA on lines 150 to 158. Start by something like “Let M denote any data matrix. Let V denote the eigenvectors of M^T M. We then define the matrix P as P = MV. The matrix P is diagonal. (You can then give (7)) ”. You don’t need more than 2/3 lines to recall how PCA works\n- Lines 166 - 167, when you introduce P_\\Omega, the proper technical term is “mask”. \n- Lines 166-167 “the matrix that preserves the entities”, “and replaces the remaining entities” —> “entries”\n- The notation “\\Omega^\\perp” is usually used to denote the orthogonal complement. I would use $\\Omega^c$ for the complement.\n- On lines 197-198, you first say that a fair intuitive approximation for M is (11) but then you seem to indicate that SRPCA starts with the restriction to the mask X_\\Omega? Which initialization do you use? This is not clear. If you don’t use (11), this should go \n- lines 226- 229 are not clear. Do you mean that the value of the matrix on the mask does not change? This does not seem true to me as when you compute the PCA decomposition you do not necessarily maintain the values on Omega. In any case, this should be rephrased. \n- In your statement of Lemma 3.1., Why not replace P^{(k-1)}R^{(k)} by M^{(k+1)} ? Moreover, your statement does not imply convergence. The convergence would require a strict inequality (which I guess you have, yet just made a typo). Also the expression converges iteratively does not really mean anything. You can just use “converges” or in this case, you can even say “converges linearly with respect to the ||P_\\Omega()||“ semi-norm\n- line 262/263 “This is a key finding, because if the algorithm terminates for external reasons ” this is unneccesary\n- line 270 - 292, in Algorithm 1, how do you determine the number of components that you retain. E.g. how do you set the value of r on line 278?\n- The statement of Lemma 3.2. is unclear. What do you mean by “it converges at an iteration K”\n- On Figure 2, the contrast does not seem to be the same between SRPCA and ALM\n- line 300-301 “which start each iteration with teh” —> “with the”\n- Lines 327 - 334, why are some of the convergence time and reconstruction errors underlined? You should clarify this.\n- Line 312-313, the statement of Lemma 3.3. does not mean anything. What is K? From what I understand, K is defined with respect to a given tolerance epsilon_tol. This should be clarified.\n- Line 312- 313, I find Lemma 3.3. somewhat misleading. From what I understand, the Fast RPCA algorithm corresponds to a change in the stopping criterion of Algorithm 1. I.e it basically tells you you should add the criterion ||P_{\\Omega}(X - P^{(k-1)}R^{(k)})||_F^2< delta on top of the stopping criterion on the successive iterates. Why not merge this with algorithm 1? It would make sense to me to have a combined criterion of the form “if deviation between iterates < epsilon or error on Omega < delta then stop“ It sort of makes sense that if your error on Omega get very small, you won’t be able to make further progress. You can keep part of the discussion from section 3.5 if you want but to me it would make more sense to merge the Fast SRPCA and the SRPCA algorithms (possibly with a short justification from section 3.5). This would be much clearer."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The possibility to parallelize has some interest, the disadvantage being that you have to compute an eigenvalue decomposition at each step and to invert a matrix of size r by r (see (16)) which although not as computationally expensive as a full inverse can remain quite expensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an iterative algorithm for matrix completion which the authors call Sparse Reverse Princial Component Analysis (SRPCA). The algorithm is based on iteratively (1) computing a PCA decomposition and (2) updating the principal vectors through a smooth gradient descent step."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My main concern has to do with the fact that the algorithm remains very close to existing algorithms (e.g. SVT from Cai et al. ) based on a combination of a gradient step and a truncated SVD. There exists an extensive body of work on matrix completion and any new approach (although perhaps interesting) would require a serious analysis and comparison with the literature. Had the authors shown a clear improvement in the rate of convergence (not just numerical as shown in Table 1) or in the number of samples needed for the recovery of the matrix, I would have been more inclined to accept the paper. But given that they only show convergence, I have to say I feel a little undecided. Perhaps the improvement should be better documented. E.g. what is the rate of convergence of SVT for example?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* Rephrasing the presentation in terms of the singular value decomposition would help elucidate what the updates correspond to. While an attempt is made at connecting R to an eigenvector matrix, after the first iteration R is no longer an eigenvector matrix and P no longer corresponds to principal components as traditionally defined. Further, the differences between the updates proposed and those performed in ALS should be clear.\n* The authors claim that the proposed algorithm leads to “a performance boost for smooth matrices”. At a minimum, the authors should precisely define the notion of smoothness used.\n* It would be helpful to have working code that fully reproduces Figures 1-3, including all comparisons to other methods.\n* Could you make a phase transition plot indicating how performance degrades as a function of rank and proportion of missing data? See Figure 1 in https://arxiv.org/abs/0805.4471 and the associated description for the setup.\n* One of the claims in the abstract is that the proposed method results “in a significant convergence acceleration“. To demonstrate this, the authors should include convergence plots depicting the reconstruction error of the algorithm (on the y-axis) as a function of the amount of iterations or time elapsed (on the x-axis), and repeat this for all methods compared in the experiments section. See Figure 1 in https://arxiv.org/abs/1605.07051 for an example.\n\nMinor comments\n\n* Line 110: apositive -> a positive\n* Line 115: algorithm.Unfortunately -> algorithm. Unfortunately\n* Line 119: you’re relaxing the minimization problem (1), not relaxing the rank\n* Line 147-161: Variables are used before they are defined, including P, V, S_p, and n\n* Line 168: entities -> entries\n* Line 170: minimize the problem -> reduce the problem\n* Line 11 in Algorithm 1: R(k) is used before it’s defined — probably should reorder lines 11-13.\n* Line 13 in Algorithm 1: It is unclear that this matrix contains eigenvectors of any relevant matrix after the first update. It is also unclear that it will have orthonormal columns after the first update.\n* Line 300: teh -> the\n* Line 410: enignes -> engines\n* Line 510: closed for -> closed form\n* Line 519: “can influence from“ What does this mean?\n* Line 520: predicitons -> predictions"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The proposed methods (SRPCA and fastSRPCA) appear novel.\n* The numerical results shown are promising. In particular, Figure 1-3 shows that SRPCA is significantly outperforming other methods, especially in the presence of large amounts of missing data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new iterative method for solving matrix completion problems in the presence of high levels of missing data. The proposed method is similar to ALS; the algorithm alternates between finding a least-squares solution (which has the benefit of being a convex problem) and performing a heuristic update. Theoretical claims of monotonic convergence of the iterates in finitely many steps are presented. A modified, faster-converging version of the algorithm is also presented. Finally, the authors provide numerical evidence of impressive performance on three datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The key theoretical result (monotonic convergence to within epsilon tolerance in finitely many steps) is highly misleading. The termination/convergence criterion of the algorithm is previous_reconstruction_error - current_reconstruction_error < tol without absolute value bars. This means that as soon as an iterate has larger reconstruction error than the previous one, the algorithm will terminate. Trivially this means that the algorithm will monotonically decrease for as long as it runs precisely because it terminates as soon as monotonicity fails. However, this does not give any insight into whether the final iterate has, for example, good reconstruction error. \n* While the numerical results shown are promising, they are limited overall. The key motivation is performance in the presence of limited data, so a phase transition plot indicating performance as function of amount of data present would be helpful. Further, the discussion focuses on how the algorithm converges very quickly, but no convergence plots are included. Finally, important parts of the experiments are not reproducible; the code provided in the supplement that creates Figures 1-3 does not run because of a missing file. (See suggestions below for details on what kinds of numerics would be helpful.) \n* The presentation lacks clarity. The connection to ALS or alternating minimization methods is not made explicit in the text despite the fact that the algorithm can be interpreted as a variation of ALS. Specifically, the update to R(k) is solving a least-squares problem (i.e., one step of ALS). The update to P(k) is a heuristic update, but it is unclear what minimization problem this update is solving, if any. \n* The “fast” variant of the proposed method (fastSRPCA) is poorly motivated. While the authors claim that it improves convergence at the cost of slightly degraded reconstructions, there is no empirical or theoretical comparison between fastSRPCA and SRPCA. \n* Numerous other claims are unsubstantiated or false.\n * 197: Why is this a “fair and intuitive first approximation?”\n * 216: “The new update is smoother than the prior update.” This claim is never substantiated, nor is the relevant notion of smoothness defined.\n * 220: “This adds a layer of nonlinearity...” Where is the nonlinearity? All the iterative updates in SRPCA are matrix multiplications, which are linear.\n * 244: “Applying (16) is scalable for big data.” While the author points out the opportunity for parallelization, there is no analysis of the complexity of the algorithm with or without parallelization. This should be a straightforward calculation, and would lend credence to the fact that the algorithm could be scaled to large datasets."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the time complexity of the proposed method? Please add a time complexity analysis and provide the running time comparison in the experimental results section.\n2. As the current experiments are small-scale data, how about the performance on large-scale data such as large images or MovieLens 1M? Will the time cost of the proposed method increase significantly?\n3. The current experiment on image completion does not consider noisy situations. How about the performance of image completion with Gaussian noise?\n4. Please compare the performance with the following work if possible: Li, Chao, Wei He, Longhao Yuan, Zhun Sun, and Qibin Zhao. \"Guaranteed matrix completion under multiple linear transformations.\" In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11136-11145. 2019."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The method and theoretical analysis are technically sound. Experimental results show some advantages of the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the author proposes a new matrix completion method that uses a PAC-like update regime. The convergence analysis is provided, and a fast version is developed. Experiment verifies the desired performance of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The time complexity is limited due to the utilization of SVD. The efficiency of large-scale data should be further investigated."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Q1) Why do you use the terminology \"sparse reverse of PCA\"?\n\nQ2) Why should the new estimate $M^{(k+1)}$ be smoother than $M^{(k)}$, as claimed in lines 216-217, and in which sense exactly? \n\nQ3) Line (13) is labeled \"Eigenvectors update\". It seems to refer to the eigenvectors of $(M^{(k)})^T M^{(k)}$, that is, the right singular vectors of $M^{(k)}$. But these should be orthogonal, and I don't see how the algorithm enforces this constraint. Otherwise, how is the update of $P^{(k)}$ (line 11 of Algorithm 1) supposed to work?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "n/a"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper is focused on the matrix completion problem. It essentially employs a Burer-Monteiro factorization approach with alternating updates, after resetting the observed entries according to their given values at each iteration. A variant is proposed that interpolates between these observed values and the ones estimated by the algorithm, but the hyperparameter choice advocated by the authors effectively suppresses the interpolation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1) This reviewer does not see any new ideas or useful insights brought by this paper. Instead, the proposed algorithm just seems to be a straightforward combination of well-known ideas: it employs a standard low-rank factorization with factors denoted $P$ and $R$ (even though it claims in the abstract to propose a reformulation of this standard approach) and updates the factors in an alternating fashion. Worse still, it seems to claim that some obvious and widespread techniques are innovative, such as simply setting the observed entries to their given values along the iterations (see lines 201-202).\n\nW2) The paper claims multiple times to \"maintain smoothness across the reconstructed matrix\", but offers no concrete elements as to why this should be the case. In fact, based on what is promised multiple times (including in the abstract), one would expect a formalization of these ideas, and some sort of regularization or constraint being enforced for promoting smoothness. Instead, only quite vague and superficial comments are made, trying to imply that the simple, standard updates given in the paper are somehow capable of automatically promoting smoothness in the solution. In particular, in lines 216-217 it is said that the new estimate $M^{(k+1)}$ is \"smoother\" than $M^{(k)}$, but no formalization of what is meant, proof or even concrete heuristic arguments are given to support this claim.\n\n\nW3) The paper suffers from a significant lack of clarity. Many sentences are poorly written, contain unusual expressions, improper formalization, or misinterpret basic notions such as convergence. Some examples are:\n- \"converges iteratively\"\n- \"SRPCA is proved to improve in performance iteratively until convergence\" (simply meaning that the objective function is non-increasing along iterations)\n- Lemma 3.2 seems to mistake the convergence of an algorithm with its stopping. It claims to prove an upper bound on the number $K$ of iterations required for convergence, but it merely shows that the algorithm stops, as a consequence of the imposed stopping criterion.\n- The solution to the problem in (14) is denoted with the same symbol as the optimization variable.\n\nW4) The paper does not properly motivate its contents. It only vaguely mentions some standard difficulties faced by certain classes of algorithms, but does not explain in a clear way how exactly it proposes to overcome such difficulties. In particular, the alternating least-squares (ALS) approach is said to operate by randomly partitioning the observed entries into subsets (known as resampling), and then is criticized for ignoring \"the smoothness of the data set due to the random partitioning of the original matrix.\" Yet, the authors are only focusing on a particular version of this approach: there is no reason why ALS must be employed with such a random partitioning in the first place (see, e.g., arXiv:1312.0925 and arXiv:1411.8003, the latter one containing a discussion on resampling).\n\nW5) In the extension of Section 3.5, a parameter $\\alpha^* \\in [0,1]$ is introduced to interpolate the observed entries between their currently estimated and given values, instead of fixing them according to the latter. However, the authors propose a heuristic that amounts to setting $\\alpha^* = 1$ until the difference (iin Frobenius norm) between the model and the observations is small on those entries, and then switching to $\\alpha^* = 0$, at which point the algorithm is shown to stop. Therefore, the purported \"smoothing\" advantages brought by the interpolation are eliminated, and the introduced variant becomes equivalent to the previously proposed scheme. \n\nW6) There are many typos along the text, indicating that it was not properly revised prior to submission."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024srpca,\ntitle={{SRPCA}: Sparse Reverse of Principal Component Analysis for Fast Low-Rank Matrix Completion},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zNfdtV9ADQ},\nnote={under review}\n}"
},
"abstract": {
"value": "Supervised and unsupervised learning methods experience a decline in performance when applied to incomplete, corrupted, or noisy datasets. Matrix completion is a common task to impute the missing values in sparsely observed matrices. Given a matrix $\\mathbf{X} \\in \\mathbb{R}^{m \\times n}$, low-rank matrix completion computes a rank-$r$ approximation of $\\mathbf{X}$, where $r\\ll\\min\\\\{m,n\\\\}$, by only observing a few random entries of $\\mathbf{X}$. It is commonly applied for recommender systems, image processing, and multi-output collaborative modeling. Existing matrix completion methods suffer either from slow convergence or failure under significant missing data levels. \nThis paper proposes a novel approach, the Sparse Reverse of Principal Component Analysis (SRPCA), that reformulates matrix factorization based low-rank completion $(\\min_{\\mathbf{U},\\mathbf{V}}\\Vert\\mathcal{P}_{\\mathbf{\\Omega}}(\\mathbf{X}-\\mathbf{U}\\mathbf{V}^T)\\Vert_F^2)$\nto iteratively learn a single low-rank subspace representation by solving the convex optimization problem\n$\\min\\_{\\mathbf{V}}\\Vert\\mathcal{P}\\_{\\mathbf{\\Omega}}(\\mathbf{X}-\\mathbf{P}\\mathbf{V}^T)\\Vert_F^2$ under the principal component analysis framework, resulting in a significant convergence acceleration. SRPCA converges iteratively and is computationally tractable with a proven controllable upper bound on the number of iterations until convergence. Unlike existing matrix completion algorithms, the proposed SRPCA applies iterative pre-processing resets that maintain smoothness across the reconstructed matrix, which results in a performance boost for smooth matrices. The performance of the proposed technique is validated on case studies for image processing, multivariate time-series imputation, and collaborative filtering. SRPCA is also compared with state-of-the-art benchmarks for matrix completion."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"matrix completion",
"low rank",
"PCA",
"collaborative filtering",
"image inpainting",
"time-series imputation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/428187414791443d2e36ff927d2e2b379f0623ed.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/6f7879bbf09282b0d842098269b1487efe00ccb3.zip"
},
"title": {
"value": "SRPCA: Sparse Reverse of Principal Component Analysis for Fast Low-Rank Matrix Completion"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zOMa82W1HV | SQuBa: Speech Mamba Language Model with Querying-Attention for Efficient Summarization | main | Active | Summarisation;Speech;Mamba | applications to computer vision, audio, language, and other modalities | 3;3;5;5;6 | 5;5;4;3;3 | 2;2;2;2;3 | 2;2;2;2;3 | 3;2;3;3;3 | 4.4 | 4 | 2.2 | 2.2 | 2.8 | -0.931695 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I'm curious about the performance of advanced models (e.g., Whisper v3 and Llama 3) in building a cascade pipeline. Since this work aims to explore LLMs for speech processing, and LLMs are effective in handling long text and summarization tasks, how would these models compare?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is clearly structured and represents a valuable exploration of using LLMs for long speech processing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper explores the use of Mamba-based multimodal LLMs to process long speech segments. The authors also apply DPO to enhance alignment during the instruction fine-tuning stage. Their experiments focus primarily on the speech summarization task, showing that the model can successfully process long speech."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Although the paper explores long speech processing using LLMs, the motivation is not sufficiently compelling. Notably, LLMs excel at summarization, and long speech summarization could be effectively handled by combining an ASR model with a strong LLM. While ASR may introduce some errors, LLMs are generally robust enough to manage this task. Therefore, long speech summarization may not be the most suitable task for evaluating LLMs in handling extended long speech inputs.\n- The contribution is somewhat limited, as Mamba-based multimodal LLMs and DPO have both been explored in speech instruction tuning. This work primarily combines these two methods and tests them on the speech summarization task.\n- The experiments are insufficiently comprehensive; relying only on speech summarization does not robustly support the model’s capability with long speech. Furthermore, the LLM used here is not particularly strong.\n\nOverall, this paper lacks novelty and persuasiveness and would benefit from further development."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q-former-like mechanism should be able to compress input of any length into fixed length. Hence I'm a bit confused about the decision to compress every 0.33s of audio into 2 query vectors. This differs from previous works including arxiv:2309.13963 and arxiv:2407.02005, which considered 30~100 vectors for every 30s of audio. In this way, the contextual information further than 0.33s will not be captured by the Q-former. I guess that the reason is to avoid the high-cost quadratic cross-attention but it will be better for the author to discuss that explicitly. Also, compressing a short sequence (only dozens of vectors, as each Whisper frame takes 25ms) into merely 2 vectors is rather simple and I doubt if the complicated Q-former will really outperform a much simpler one, e.g. pooling or convolution, which may also process information within such a short context well. More ablation studies will be necessary to justify this decision, by comparing with pooling or CNN, and comparing with different context lengths.\n\nThere are many other approaches to compress speech signals into \"token-like\" embeddings to be processed by LMs, e.g. HuBERT units, speech tokens, and neural audio codec, while Q-former is somehow similar to a kind of VQ, but with continuous features. Can you elaborate on the reasons why you chose Q-former? Do you think there is any specific advantage?\n\nI am particularly concerned with the unidirectional Mamba used in Q-Mamba, and I fail to find the motivation to apply Mamba to the sequence of query vectors. Trainable query vectors should be already capable of introducing positional information. Ablation studies (e.g. by removing this Mamba layer) should be necessary to justify this choice.\n\nI also have some questions regarding the use of DPO. What is the experiment without DPO in Table 4? Using supervised fine-tuning only?\n\nIf the issue w/o DPO is that the summary will be too detailed, has the author considered any other more straightforward solution, e.g. length penalty during generation, downsampling the input sequence, or upweighting EOS during training?\n\nIt is commonly believed that instruction fine-tuning leads to better alignment, but at the cost of flexibility and adaptability to specific downstream tasks in fine-tuning, while the authors use a instruction fine-tuned version of Mamba-2.8B as the base LLM. Is there any specific reason to use Mamba-2.8B-Zephyr instead of the original Mamba-2.8B model?\n\nWhat is the LLM used in the Cascaded model? The original Mamba-2.8B or the instruction fine-tuned one? Is it further fine-tuned to summarization?\n\nCan you elaborate more on the speedup of the model compared to the cascaded one? With both of them using Whisper and Mamba (though the inputs to Mamba are different), I'm curious about the source of the extra overhead in the cascaded pipeline. Also, it can be helpful to report the average input sequence length to the final LLM model as a reference to the expected computational costs.\n\nUsing only synthesized datasets is a weak point of the empirical evaluation, particularly when the labels are also synthetic. It can be necessary to also report the results on real datasets, e.g. SLUE-SUMM, and include more examples and human evaluations.\n\nIt can be interesting to report the ASR performance of the model after either of the two Alignment stages.\n\nIt will be better to also include the original transcript in Appendix C.\n\nIn Figure 3, is Whisper frozen in the Fine-tuning Stage?\n\nMinor issues:\nL210: Figure 4.1?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The model enables efficient speech summarization by combining pretrained Mamba and Q-former.\n* Empirical results are strong."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a sub-linear complexity speech summarization model by combining Q-former and pretrained Mamba. Segmented audios are processed by Whisper and compressed by Q-former with Mamba-processed query vectors, which are then fed to Mamba LLM to generate the summarization. A 3-stage training with different tasks is carried out, i.e. short-form ASR, long-form ASR, and summarization, accompanied by DPO. Empirical studies show better results and speed compared to cascaded models and a HuBERT+LLAMA E2E model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* Ablation studies are not sufficient to demonstrate the advantages of the proposed methods and to identify the impact of each component. Some design choices are yet to be well-motivated.\n* The method is more like replacing transformers in existing methods (esp. arxiv:2407.02005) with Mamba, which leads to doubt on the technical novelty.\n* Only one synthetic dataset is used.\n\nSee questions below for details."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "/"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Novel Architecture:\n\nInnovative combination of Mamba with querying-attention for speech processing.\n\nWell-motivated design choices for handling long-form speech.\n\nClear architectural improvements over existing approaches.\n\n- Strong Empirical Results:\n\nSignificant speed improvements (17x faster than cascaded baseline).\n\nCompetitive performance on standard metrics.\n\nComprehensive ablation studies validating design choices.\n\n- Technical Soundness:\n\nThorough theoretical foundation and clear mathematical formulation.\n\nWell-documented training process and implementation details.\n\nCareful experimental design with appropriate baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents SQuBa, an end-to-end speech summarization model that combines a Mamba language model with a novel querying-attention mechanism for efficient processing of long speech inputs. The key contributions are:\n\n- A query-attention Mamba projector that compresses acoustic features into compact semantic tokens.\n- Extension of Mamba-based LLM for speech summarization with a two-stage training process including bootstrapped DPO.\n- Empirical demonstration of competitive performance with significantly faster inference speeds compared to transformer-based approaches.\n\nThe model achieves this through:\n\n- Using Whisper encoder for speech features.\n- Novel Q-Mamba projector for efficient feature compression.\n- Pre-trained Mamba LLM (2.8B parameters) for generation.\n- Two-stage training: speech alignment followed by summarization fine-tuning.\n- Bootstrapped DPO for improved summary quality."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Limited Dataset:\n- Uses synthetic speech data for fine-tuning\n- Could benefit from evaluation on more diverse real-world speech datasets\n- Lack of cross-lingual evaluation\n\nArchitectural Constraints:\n- Fixed 30-second chunks due to Whisper encoder limitations\n- Query length choices could use more theoretical justification\n- Potential information loss in compression not fully analyzed\n\nEvaluation Metrics:\n- Limited human evaluation or qualitative analysis\n- No discussion of failure cases or limitations"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In Equation 1, what is h'(t) ? \n2. The \"ideal\" query length for speech transcription is likely not representative of representations necessary for speech summarization. Can the authors clarify why these ablations were done for the transcription task on Librispeech ?\n3. How does the speed of Scuba compare to that of the model by Shang et al ? \n4. The Whisper speech encoder is frozen, and it is not clear why this modeling choice was made."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper attempts to address an important challenge, i.e., summarization of longform audio through a cross-attention based temporal downsampling module. \n2. It applies the recently introduced DPO technique to speech summarization, and demonstrates improved ROUGE and METEOR scores from this."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper describes an approach to summarizing 6-minute-long audio recordings by combining the Whisper speech encoder with the Mamba LLM through a cross-attention based Mamba querying projector. The authors show that DPO improves ROUGE and METEOR metrics, and that the proposed model has a better ROUGE and METEOR score, and latency over the cascade model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I have serious concerns about the novelty of the proposed approach. From a modeling standpoint, the work is very similar to Shang et al. (2024). From a training method standpoint, the 2-stage fine-tuning approach involving speech recognition and speech summarization is well established in the field since Sharma et al. (2021), leaving only two differences: (a) having an ASR training stage over both short and long audio as opposed to just short audio, and (b) using DPO post hoc, another well established technique to improve ROUGE and METEOR numbers. All in all, it appears that there is little technical novelty in the paper. \n\n2. Validating the proposed approach on a single relatively shortform audio dataset (upto 6 minutes) comprising synthetic audio is not very convincing. Furthermore, the work is done on a custom dataset whose LLM-generated summary labels have not been validated for correctness, either through automatic or human evaluations. Since LLMs are known to hallucinate, it is hard to make a meaningful case using any numbers on this dataset. The authors should ideally consider evaluating on any real other dataset(s) with real audio. \n\nTo add more context, here is a paper [1] that used synthetic data for speech summarization but still reported a myriad of automatic and human evaluation metrics to validate that the data used was reasonable. Something similar to this might be more convincing than what is in the paper currently. \n\n3. The metrics used for speech summarization in this paper do not go far enough. It is well known that ROUGE and METEOR based evaluations for summarization are not all encompassing, and that the metrics have significant flaws. This again makes it hard to validate that the observed improvements correlate with summaries of higher quality. The authors could supplement these measures using human evaluations of coherence, consistency, factuality and relevance. \n\n4. Table 4 could be expanded to show the impact of the long audio transcription based alignment if any. \n\n5. The manner in which DPO is performed is not very convincing. The authors use the model generated responses as the non-preferred responses and the ground-truth summaries as the preferred responses. Do the authors validate that the model generated responses are in fact undesirable, and record metrics that demonstrate the same ?\n\n\n[1] J. -W. Jung, R. Sharma, W. Chen, B. Raj and S. Watanabe, \"AugSumm: Towards Generalizable Speech Summarization Using Synthetic Labels from Large Language Models,\" ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Seoul, Korea, Republic of, 2024, pp. 12071-12075, doi: 10.1109/ICASSP48485.2024.10447328."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Refer to weaknesses"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to weaknesses"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Originality:\n1. The Mamba-based approach has not yet been utilized for speech summarization.\n\nQuality:\n1. Faster inference with better summarization performance against a cascaded and E2E baseline.\n\nClarity:\n1. The paper is mostly easy to follow.\n\nSignificance: Results could be significant to speech summarization community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an end-to-end abstractive summarization method that processes speech inputs directly. It utilizes a querying-attention Mamba projector to condense extended acoustic features into compact semantic tokens, which are subsequently fed into the Mamba Large Language Model (LLM). They further employ Direct Preference Optimization (DPO) fine-tuning to produce coherent summaries. Experiments on a TTS-synthesized speech summarization dataset demonstrate that this approach outperforms both a cascaded baseline and an end-to-end baseline."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Clarity: \n1. The biggest weakness to me is the lack of clarity on the baseline models. They use Whisper large v2 as the ASR model in the cascaded system, but it has a limit of 30 seconds. How do they use it to get ASR output? Do they feed every 30-second window of audio as input? Further, do they finetune ASR or LLM or are they used in a zero-shot manner? What are the results in both these scenarios? If LLM is finetuned, do they also use DPO finetuning? The paper should provide more details about the E2E speech summarization baseline in the main text to make the paper self-contained. \n2. It’s also unclear how the approach handles long speech sequences, which seems to be a central aspect of this work's novelty. The paper mentions chunking audio into 30-second segments, yet doesn’t address how contextual continuity is managed between chunks. Prior studies on streaming ASR (e.g., https://arxiv.org/abs/2107.09428) indicate that chunk boundary in the middle of token can result in generation inaccuracies. Clarifying whether any overlap is applied between chunks and providing additional discussion on this topic would improve the paper's depth and accessibility.\n\nSoundness: \n1. The evaluation is limited to one dataset, a TTS-generated synthetic speech summarization dataset. Including publicly available human speech datasets, such as SLUE_TED (https://huggingface.co/datasets/asapp/slue-phase-2) or AMI (https://groups.inf.ed.ac.uk/ami/corpus/), would provide a more robust assessment and ensure that the approach is tested on natural human speech data.\n2. Additional details on the synthetic dataset would be valuable, including whether it consists of single-speaker audio, and its quality (e.g., WER for the TTS output as evaluated by a pre-trained ASR model or via human relevance judgment).\n3. Further analysis is needed to pinpoint where the model outperforms a cascaded baseline. Does this improvement stem primarily from avoiding error cascading (that can potentially be addressed by improving the ASR system), or does the model also capture non-phonemic audio signals that enhance summarization quality?\n\n Significance and Originality: I feel that Mamba-based approaches have been shown to be useful for various speech processing tasks (https://www.isca-archive.org/interspeech_2024/miyazaki24_interspeech.html) and have been shown to be particularly efficient for long-form speech processing. The paper has limited novelty since it additionally verifies an expected conclusion that Mamba-based architecture is also useful for another speech processing task, namely speech summarization. Further, the lack of clarity in setup of baseline models make me question the improvements claimed in the work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024squba,\ntitle={{SQ}uBa: Speech Mamba Language Model with Querying-Attention for Efficient Summarization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zOMa82W1HV},\nnote={under review}\n}"
},
"abstract": {
"value": "Abstractive Speech Summarization (SSum) becomes increasingly difficult as the input speech length grows. To address this, we present SQuBa (Speech Querying Mamba Architecture), an end-to-end model designed explicitly for efficient speech summarization. SQuBa leverages a querying-attention Mamba projector to condense extended acoustic features into compact semantic tokens, which are subsequently summarized by the Mamba Large Language Model (LLM). The architecture’s computational complexity scales linearly with input length, enabling efficient handling of longer inputs. A two-stage training framework, complemented by bootstrapped Direct Preference Optimization (DPO) fine-tuning, empowers SQuBa to generate concise and coherent summaries. Experimental results demonstrate that SQuBa delivers competitive performance while significantly improving inference speed, making it ideal for real-world applications such as podcast and meeting transcriptions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Summarisation",
"Speech",
"Mamba"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c82c28c31b6fc46fa036bffd97f0231774b30cb1.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "SQuBa: Speech Mamba Language Model with Querying-Attention for Efficient Summarization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zONMuIVCAT | Unified Parameter-Efficient Unlearning for LLMs | main | Active | Large Language Model Unlearning; Machine Unlearning; Influence Function | interpretability and explainable AI | 5;5;6;6 | 3;3;3;3 | 1;2;3;3 | 2;2;3;2 | 3;3;4;2 | 5.5 | 3 | 2.25 | 2.25 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In the limitation section is mentioned a limited due to \"first-order Taylor expansion\", but I thought influence functions rely on second order expansion, which one is it?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper unifies three unlearning objective under a single simple framework.\nThe paper directly addresses the complexity concerns the framework seems to have in two simple ways: using fast Hessian-vector product, and using mini-batches. It then provides a convincing argument for why such problem formulation would converge using SGD."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper suggest a unified and feasible way to address multiple unlearning scenarios under a single framework. It first generalizes three unlearning objectives. Then, proposes to use influence-functions to approximate the influence of datapoints requested to be unlearned. It then proposed to use PEFT to avoid full finetuning of the model, by training each adapter separately."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper is a bit hard to read. I would suggest putting most equations in the appendix and focus on high-level explanation in the body of the paper. It is unclear exactly what exactly how adapters are used in Algorithm 1. The evaluation is a bit limited, consider comparing this method performance to other unlearning approaches. Evaluating efficiency in seconds can be a nice addition but the main evaluation should compare an asymptotic running-time complexity."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "see weakness. Also, how is the performance of the method on text-generation tasks? Could you evaluate your approach on some QA datasets?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The problem is important and interesting. The three unlearning tasks are practical and meaningful in real-world settings.\n\n2. The unlearning approach edits model parameters based on influence functions, which avoids finetuning or re-training the models\n\n3. The evaluations are comprehensive. \n\n4. The writing is clear and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce a novel instance-wise unlearning method that utilizes influence functions to adjusting parameters for unlearning tasks. Specifically, the method can remove specific sample from the training dataset, adjust input tokens in user queries, as well as correct model response. Experimental results show that the method can perform unlearning tasks with high accuracy while ensuring efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My concern is mainly about evaluation datasets. \n\n1. Although the paper claims to address privacy concerns in LLMs, the authors primarily evaluated the method using tabular datasets for recommendation tasks and a multimodal dataset for a classification task. While these tasks are representative, they are relatively simple and straightforward. Although the authors applied prompt engineering to those data, the training data are still very similar. \n\n2. In real AI applications, LLMs are used for text generation more often, and the training data consists of documents from various sources and may contain sensitive information, such as bank accounts, addresses, and SSNs. Thus, this paper lacks evaluations in more practical and meaningful scenarios that would better demonstrate its effectiveness in addressing privacy concerns in real-world settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1) How effectively can the proposed algorithm let the model forget the samples?\n2) How effectively can the proposed algorithm maintain the model performance on generative tasks?\n3) If a sample is also used in the pre-training, can it still be unlearned? \n4) How can we tell whether the unlearning (or failure to unlearn) is on the PEFT module but not on the pre-trained model?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1) The paper shows the motivation and derivation of the proposed algorithm/framework.\n2) The proposed algorithm demonstrates that it can effectively maintain good performance on different tasks, including recommendation and relation mining tasks.\n3) The proposed algorithm is also shown to have high efficiency compared to the existing algorithms."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies machine unlearning for parameter-efficient fine-tuning (PEFT) of large language models (LLM). The authors first propose a framework called LLMEraser for three different unlearning tasks: instance removal, query modification, and response correction. The core of LLMEraser is based on the influence function, based on which the algorithm can minimize the effect of the removed or modified samples. Some experiment results are presented to show the effectiveness of the proposed algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) All the experiments show how well the models can perform after unlearning a certain amount of training samples. However, it seems the paper does not present or compare how effectively the algorithms can let the model \"forget\" those training samples, which is the original goal of the unlearning algorithm.\n2) As PEFT is fine-tuning the model, it is unclear how to distinguish the influence of a sample when it is in the fine-tuning training set or the pre-training training set. When the sample was also used in the pre-training set, it is also unclear whether it is reasonable to require the PEFT to eliminate those \n3) The experiment settings in the paper do not include the generative tasks of LLMs.\n4) Some minor typos, such as \"handling handle\" near the end of page 9."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide a detailed analysis of estimation errors to demonstrate their impact on performance? Given the importance of precision in unlearning tasks, such an analysis would help establish whether the errors introduced by the method are indeed within an acceptable range and do not significantly detract from the efficacy of LLMEraser in high-precision unlearning scenarios.\n\n2. The LLM4Rec unlearning tasks are different from those unlearning tasks used in the baseline paper. Could the authors provide performance results on additional unlearning tasks more closely aligned with those proposed in other baselines? Additionally, how do the LLM4Rec tasks reflect the generalizability of LLMEraser across diverse unlearning scenarios? \n\n3. While LoRA was used in the experiments, it would be helpful to know the specific experimental setup, including the number of trainable parameters. Since computational complexity is directly related to the number of trainable parameters and rank values in LoRA, could the authors clarify these settings and present results across different ranks? Additionally, could they specify the sample sizes used in unlearning experiments, as sample size likely affects the overall complexity?\n\n4. While the method demonstrates computational efficiency, I am concerned about memory efficiency, particularly for broader applications. Could the authors provide an analysis of the memory cost and specify whether LLMEraser could run on a lower-memory GPU than an Nvidia A100? If the method is limited to high-memory GPUs, this could hinder its practical application. A comparison of memory usage with baseline methods is also important to clarify this point.\n\nI would be happy to engage with the authors to help improve the presentation of the method and evaluation, but my concerns are not insignificant. Clarifications would need to resolve my questions in order for my score to improve."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The paper is written clearly and is easy to follow, with clearly presented formulations.\n2. The taxonomy of unlearning tasks is well-defined, providing clarity on the types of unlearning scenarios addressed. \n3. Extensive experiments are conducted across diverse unlearning tasks with both LLMs and MLLMs, covering recommendation and multimodal relation mining applications. \n4. LLMEraser accelerates the computation of the inverse Hessian-vector-product in the influence function, enabling efficient implementations. This improvement is valuable for LLM applications, where computational efficiency is a growing concern."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces LLMEraser, a unified framework for parameter-efficient unlearning in large language models (LLMs), specifically tailored to address privacy and security concerns in domain-specific fine-tuning. The framework utilizes influence functions to perform instance-wise unlearning tasks such as instance removal, query modification, and response correction. LLMEraser allows unlearning without requiring full model retraining, and has demonstrated efficacy in preserving model performance across various unlearning tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While LLMEraser reduces the need for retraining, the memory-intensive computation of inverse Hessian-vector products remains demanding. This requirement may limit scalability, particularly for very large models or environments with limited GPU memory.\n2. The method’s reliance on first-order Taylor expansion in influence functions can introduce estimation errors. A lack of detailed error analysis makes it difficult to assess the impact of these errors, especially in tasks requiring high unlearning precision.\n3. Limited generalizability is a concern, as the LLM experiments primarily focus on LLMs for recommendation (LLM4Rec). Broader evaluation across more diverse LLM unlearning tasks would strengthen the claims.\n4. Key experimental details, such as PEFT configurations and the number of trainable parameters, are not fully provided. These details are essential for evaluating the method's complexity and efficacy across different settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024unified,\ntitle={Unified Parameter-Efficient Unlearning for {LLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zONMuIVCAT},\nnote={under review}\n}"
},
"abstract": {
"value": "The advent of Large Language Models (LLMs) has revolutionized natural language processing, enabling advanced understanding and reasoning capabilities across a variety of tasks. Fine-tuning these models for specific domains, particularly through Parameter-Efficient Fine-Tuning (PEFT) strategies like LoRA, has become a prevalent practice due to its efficiency. However, this raises significant privacy and security concerns, as models may inadvertently retain and disseminate sensitive or undesirable information. To address these issues, we introduce a novel instance-wise unlearning framework, LLMEraser, which systematically categorizes unlearning tasks and applies precise parameter adjustments using influence functions. Unlike traditional unlearning techniques that are often limited in scope and require extensive retraining, LLMEraser is designed to handle a broad spectrum of unlearning tasks without compromising model performance. Extensive experiments on benchmark datasets demonstrate that LLMEraser excels in efficiently managing various unlearning scenarios while maintaining the overall integrity and efficacy of the models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Model Unlearning; Machine Unlearning; Influence Function"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/379d530a1f4cd8cfe45a0b082be1ab765b3b4e0f.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Unified Parameter-Efficient Unlearning for LLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zP8HygcAMY | Can LLMs Evaluate Complex Attribution in QA? Automatic Benchmarking Using Knowledge Graphs | main | Active | Large Language Model;Attributed Question Answering;Knowledge Graph | datasets and benchmarks | 5;5;5;6 | 3;4;5;4 | 2;3;3;3 | 2;3;3;3 | 3;3;3;4 | 5.25 | 4 | 2.75 | 2.75 | 3.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How do you verify the quality of converted natural language style questions?\n- What is the inter-agreement score of human annotations?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- CAQA uses KGs to generate complex QA benchmarks automatically, enabling scalability and minimizing manual annotation effort.\n- Different reasoning complexities are considered, highlighting LLMs' capabilities in handling logical relationships between facts.\n- The benchmark includes fine-grained attribution categories."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents CAQA (Complex Attributed Question Answering), a large-scale automatically generated benchmark designed to assess the attribution capabilities of QA systems, particularly Large Language Models (LLMs). CAQA leverages Knowledge Graphs (KGs) to create comprehensive attribution categories and to handle complex reasoning scenarios. The benchmark distinguishes between supportive, partially supportive, contradictory, and irrelevant evidence types and introduces reasoning complexity through different forms of evidence combination (e.g., union, intersection, concatenation)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The task setting seems very similar to NLI to me, more discussions are needed.\n- Lack of a few details about the human annotation process.\n- The distribution of the complexity is biased."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "See \"Weaknesses\"."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper introduces CAQA, a large-scale benchmark for evaluating complex attributions in QA.\n2. The CAQA dataset contains various new definitions (e.g., fine-grained attribute categories and attribution complexities), and the data construction process is automatic, considerate, and comprehensive.\n3. This paper contains comprehensive experiments. In addition to model performance on CAQA, it also includes fine-grained analysis, human consistency, and out-of-distribution data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Complex Attributed Question Answering (CAQA), a large-scale benchmark designed to evaluate complex attributions in question answering (QA). CAQA is automatically generated using knowledge graphs (KGs), includes a broader range of attribution categories along with intricate attribution reasoning scenarios, and is also aligned with human annotations. Experiments with two specifically developed evaluators and nine large language model (LLM) evaluators reveal that these models struggle to identify negative attribution categories and handle complex attribution reasoning in both zero-shot and few-shot settings but mostly perform relatively well in the fine-tuning setting."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This paper only considers GPT-3.5 and GPT-4 as closed-source LLMs, and some open-source LLMs used may be outdated (e.g., Mistral-7B has revolutionized various versions). Adding more diverse and latest models in experiments would have greater contributions and help to discover which LLMs perform best on this challenging task.\n2. There is a lack of comparisons with human performance on (a subset) of the dataset, which would better illustrate the performance gap and the challenge of the dataset.\n3. While the contribution of the paper centers on a new challenging benchmark, it would be much helpful if the authors can provide an error analysis, which will direct newcomers in future research."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Some discussion on the ambiguity (and actual errors) in the labeled categories would be useful (e.g., human annotator agreements on a sample). \n\nAlso would be good to discuss the lack of context consideration which is usually very important in real usage (e.g., the example in the paper \"Who plays Fruma Sarah in Fiddler on the Roof\" depends on which version of Fiddler on the Roof is being referenced)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The dataset is relevant for the important topic of answers with attributions from LLMs. Being able to carefully validate whether an answer actually follows from the sources is an important skill, and this dataset aims at helping with this.\n\nThe paper is well written, clearly describing the approach.\n\nThe use of the KG to create various incorrect attributions, together with using LLM to rewrite at text, seems quite effective.\n\nThe paper provides access to the full dataset for exploration which is truly helpful in assessing it.\n\nThe methods are tested on a more realistic, OOD, dataset."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces the dataset Complex Attributed Question Answering (CAQA), containing answers to questions with associated source attributions, where the attributions may or may not support the answer. The non-support attributions are divided into 3 labeled categories: Partially supported, Contradictory Irrelevant.\n\nThey evaluate how well different LLMs can classify Q+A+source into these 4 categories, finding that in many cases they struggle to do well, especially on distinguishing the non-supportive categories.\n\nThe CAQA dataset is constructed from existing KGQA datasets (GrailQA and WebQuestionsSP), making use of the associated knowledge graph to produce different types of non-supportive evidence (and using GPT-3.5 to turn KG triples into natural language sentences). The resulting dataset is quite big (137k train, 24k test), allowing for fine-tuning experiments as well. The fine-tuned models do very well in distribution, and they also do limited out-of-distribution evaluation on a subset of ALCE further annotated with these non-supportive categories, showing promising results there as well."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While breaking down the non-supportive cases into three subcategories can be helpful for understanding limitations, the boundary between them can be quite unclear. Also the prompt for the non-GPT models doesn't go into great detail (beyond some examples) on what each category means. For instance, the \"contradictory\" evidence is often for actual true facts, so they're not actually contradiction, it's just the \"wrong\" evidence.\n\nE.g., the answer \"The person who founded the United States Coast Guard also founded the United States Department of the Treasury.\" is presented as being contradicted by the source \"Alexander Hamilton is the founder of the United States Coast Guard and the Montgomery County Sheriff's Office, which is a government agency.\", but this isn't really a contradiction, it's more like missing evidence. A true contradiction should lead you to think the answer is actually false, if you trust the source.\n\nThe \"Partial support\" category also can be quite subjective, as in the case of \"The 2011 Estoril Open tournament event competition is the Men's Singles.\" being partially supported by \"The 2011 Estoril Open had a men's singles competition.\" (what's missing is apparently that \"2011 Estoril Open\" was \"tournament event competition\", but that's pretty much implied by the fact that they had a men's single compeition).\n\nBecause of this, it might also be useful to report the most important \"supported\" vs \"not supported\" scores.\n\nAnother concern is the simplicity of the dataset, with simple QA assertions attributed by short source sentences. How does good performance on CAQA transfer to more realistic settings. And can it be used to train better source attribution models as well? There is some exploration of this with the OOD ALCE dataset, but the effect (e.g., between Vicuna-13B and Vicuna-13B-finetuned) isn't as impactful as one might have hoped."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. How might CAQA be adapted to handle dynamic or temporal data in QA tasks?\n2, What specific types of biases were identified or considered when using LLM-generated prompts?\n3. Are there plans to include more diverse logical operations, such as negation, in future iterations of CAQA?\n4. Could the inclusion of human-in-the-loop evaluations further enhance the quality of the generated benchmark data?\n5, Extend the benchmark's applicability by including examples from various knowledge domains (e.g., medical, legal) to test the robustness of attributions in specialized contexts.\n6. The reliance on GPT models for generating natural language representations from KGs may introduce subtle biases. Addressing how these biases are minimized or discussing potential implications would strengthen the manuscript.\n7. Discussing how CAQA could be adapted for such tasks would add value especially how to address the challenges in more diverse, such as open-domain QA systems. ."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The use of KGs for automatic construction of the benchmark is novel, making the process scalable and adaptable.\nThe research tests various models and demonstrates the needs of fine-tuning for achieving robust performance.\nThe benchmark shows high consistency with human evaluations, supporting its credibility as an effective tool for future developments in QA systems.\nThe choice to test multiple LLMs, including state-of-the-art models like GPT-4 and LLaMA variants, provides a robust analysis of performance across different model scales and settings.\nThe inclusion of a wide range of attribution types and complexity levels sets a high standard for evaluating QA systems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript aims to bridge the gap in attribution evaluators' use of knowledge graphs by providing detailed categories. This study's experiment offers multiple configurations in zero-shot, few-shot, and fine-tuned contexts, demonstrating that the fine-tuning process can significantly improve performance. This benchmark aims to address the shortcomings of existing attribution evaluators, which face challenges with intricate attribution categories and sophisticated reasoning processes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The benchmark is tailored for KG-based QA tasks, which may not reflect the challenges present in more diverse, open-domain QA systems. \nThe reliance on GPT models for generating natural language representations from KGs may introduce subtle biases.\nThe rationale behind choosing specific complexity types (e.g., concatenation, intersection) could be expanded with examples illustrating real-world implications of these complexities in QA."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024can,\ntitle={Can {LLM}s Evaluate Complex Attribution in {QA}? Automatic Benchmarking Using Knowledge Graphs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zP8HygcAMY},\nnote={under review}\n}"
},
"abstract": {
"value": "The attribution of question answering (QA), which is to get evidences for supporting the generated answer, has attracted wide research attention. The current methods for automatically evaluating the attribution, typically relying on Large Language Models (LLMs), are still inadequate, particularly in recognizing subtle differences between attributions, and in measuring complex attribution reasoning. Existing benchmarks, which are primarily based on manual annotations, suffer from limited evaluation settings with incomplete and coarse attribution categories and reasoning scenarios, hindering the evaluation and advancement of attribution evaluators. To address this gap, we introduce Complex Attributed Question Answering (CAQA), a large-scale benchmark automatically generated using Knowledge Graphs (KGs), containing more comprehensive attribution categories and complex attribution reasoning scenarios. Our experiments with two specifically developed evaluators and nine LLM evaluators reveal that they struggle in identifying negative attribution categories and handling complex attribution reasoning in both zero-shot and few-shot settings, but mostly perform relatively well in the fine-tuning setting. Moreover, all evaluators perform inadequately in fine-grained attribution identification scenarios. The experiments also demonstrate that CAQA is consistent with human annotations, and is promising for selecting and developing more effective attribution evaluators in QA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Model",
"Attributed Question Answering",
"Knowledge Graph"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/44f117c0c2d0d66e6abedc661726e45e7a1b67f2.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Can LLMs Evaluate Complex Attribution in QA? Automatic Benchmarking Using Knowledge Graphs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPDpdk3V8L | Enhancing Clustered Federated Learning: Integration of Strategies and Improved Methodologies | main | Active | Federated Learning;Clustering | other topics in machine learning (i.e., none of the above) | 5;6;6 | 3;2;3 | 3;3;3 | 2;3;3 | 3;3;3 | 5.666667 | 2.666667 | 3 | 2.666667 | 3 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N.A."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. A holistic FL framework incorporating different clustered FL methods for more comprehensive FL. The research problem is important, and this work contributes to fixing the shortcomings of the existing related works.\n2. Comprehensive experimental evaluation has been conducted to illustrate the effectiveness of the proposed method.\n3. The paper is well written with a clear demonstration of the motivations and problems for solving."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This manuscript proposes a holistic federated learning framework to enhance classification performance by grouping clients into clusters. The framework comprehensively integrates the hard and soft partitional clustering, and clustering with and without automatic cluster number determination. Comphrehensive theoretical and empirical evidence have been provided to illustrate the effectiveness of the proposed method. Moreover, the paper is generally well-written and easy to follow."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. This work focuses on using clustering techniques to enhance the classification accuracy of FL. The difference between this type of research and the fully unsupervised federated clustering should be discussed to avoid potential misunderstandings.\n2. The efficiency issue is listed as one of the challenges in Section 4.1. But only the final number of clusters is reported accordingly. More discussions about the time and space complexity of this work, or even corresponding evaluation results are preferable.\n3. The source code is not opened in the current version."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What is the overall objective of FL in a learning theoretical sense, i.e. how exactly would the generalization error which is targeted be expressed?\nWhat would be naturally occurring drift/shift in such scenarios and how does this match with the shift modelled in experiments?\nWhat are results if the clustering itself is evaluated (eg having ground truth on the data distributions)?\nHow personalized are the models? And how does this scale with the required number of data as regards valid generalization?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The article addresses the important problem of efficient federated learning in the presence of data shift. \nIt provides a very detailed experimental analysis. \nIt also takes some effort to substantiate the observations with theoretical insight. \nMoreover, the authors promise to release the code open source."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The contribution formalizes a general pipeline/framework for clustered federated learning, on the one hand via a cost function, on the other hand via algorithmic choices (as it is not easy to include the objective of an optimum clustering number into costs in a meaningful way). This leads to an improved variant, where the data per client can belong to different clusters, and extensions of at the moment rare soft clustering schemes. The benefit is evaluated in both, comparative studies and ablation studies. A comparably long appendix addresses how to compute an EM scheme based on the costs, how to design data, what happens for linear parts, and some more insight."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The proposal left me a bit puzzled, as the specific contribution is somewhat unclear. On the one hand, the contribution promises a general framework/principle how to model FL with clustering. Here, the costs are rather obvious, as is the four-tier modeling, given the existing work how to model clustering; hence I am not sure what exactly is the contribution, is it the specific way of implementation, or specific guarantees which can be given? In how far is this modeling surprising/challenging and what exactly is the contribution, please specify (eg is it the better implementation? It would help if you could either provide specific benefits which arise from there which would not have been possible without this abstraction, or to provide examples where it is not obvious that the method falls under this common framework. \n\nThe improved version allows the individual assignment of data of one client. Here an according EM scheme is derived (a bit lengthy but straightforward), soft clustering is considered (which seems also straightforward given the existing work on soft clustering and its algorithms). Personally, I find the definition of a new way to measure distances w.r.t. drift most interesting, albeit very shortly presented. Here references to existing technologies how to deal with drift are missing (such as decomposition of sets of data with drift into some where the drift is homogeneous, e.g. moment trees and Kolmogorov trees). I suggest to have a closer look at the (exhaustive) literature on learning with drift in the incremental setup. \n\nI find the presentation suboptimal as the main part of the work reads almost trivial in wide parts, whereas some important insight seems to be hidden in the appendix. It would help if the main take aways of the appendix would be highlighted. Moreover (as already said before), please more clearly highlight why the holistic framework is not trivial, and benefitial with specific non-trivial results/examples."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see weaknesses. Generally, I think this is a solid work. I will adjust the score based on author response."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The problem is well motivated and interesting. The algorithm proposed is novel, with solid theoretical analysis and extensive experimental studies. The paper is well structured and written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces HCFL, a holistic framework for Clustered Federated Learning (CFL) that integrates existing methods. HCFL+ builds on this by addressing key challenges in HCFL, improving the effectiveness. Extensive experiments show the effectiveness of the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "For clustered based FL algorithm, there is a recent work [1] to conduct clustering based on the inferred label distributions. The authors are suggested to discuss about this clustering strategy.\n\nCould the authors provide more experiments on a wider range of beta (e.g., from 0.1 to 1.0), to show the effectiveness on different levels of data heterogeneity?\n\nAlso, there are setups such as C=2,C=3 in [1]. Such settings are also suggested to be studied. If the proposed algorithm can perform well across various data heterogeneity partitions, the paper can be stronger.\n\n[1] Diao, Yiqun, Qinbin Li, and Bingsheng He. \"Exploiting Label Skews in Federated Learning with Model Concatenation.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 38. No. 10. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a unified framework for clustered FL algorithms and improve the techniques within the framework."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024enhancing,\ntitle={Enhancing Clustered Federated Learning: Integration of Strategies and Improved Methodologies},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPDpdk3V8L},\nnote={under review}\n}"
},
"abstract": {
"value": "Federated Learning (FL) is an evolving distributed machine learning approach that safeguards client privacy by keeping data on edge devices. However, the variation in data among clients poses challenges in training models that excel across all local distributions. Recent studies suggest clustering as a solution to address client heterogeneity in FL by grouping clients with distribution shifts into distinct clusters. Nonetheless, the diverse learning frameworks used in current clustered FL methods create difficulties in integrating these methods, leveraging their advantages, and making further enhancements. \nTo this end, this paper conducts a thorough examination of existing clustered FL methods and introduces a four-tier framework, named HCFL, to encompass and extend the existing approaches. Utilizing the HCFL, we identify persistent challenges associated with current clustering methods in each tier and propose an enhanced clustering method called HCFL$^{+}$ to overcome these challenges. Through extensive numerical evaluations, we demonstrate the effectiveness of our clustering framework and the enhanced components. Our code will be publicly accessible."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Federated Learning",
"Clustering"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0a4bcc8af50fe0f24c26cc90f2c8a60d79c1656a.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/e6dad37955ac4b05dbcb64015aee5bc59667bb9c.zip"
},
"title": {
"value": "Enhancing Clustered Federated Learning: Integration of Strategies and Improved Methodologies"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPHra4V5Mc | Feature Averaging: An Implicit Bias of Gradient Descent Leading to Non-Robustness in Neural Networks | main | Active | deep learning theory;feature learning;adversarial robustness;implicit bias | learning theory | 6;6;6;8 | 5;2;4;3 | 4;3;3;3 | 3;3;3;3 | 3;4;3;3 | 6.5 | 3.5 | 3.25 | 3 | 3.25 | -0.258199 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The feature average is clearly defined in two-layer networks - the linear input combinations. However, in the deeper layer, such a definition does not seem reasonable. How do we analyse features in this case? Can the authors provide one or more examples, conceptually or mathematically, about how features are averaged in deeper neural networks?\n\n2. In image tasks, reasonable and robust features are often local and isolated. However, there are other scenarios where the features are required to have global or long-term dependency. Such as time series, and natural language. In those cases, it seems like the real interpretable and robust features are some \"mixing\" of the original data. Therefore:\n a) How should we define the feature mixing in those cases?\n b) Would the theory developed in the paper be effective in those circumstances? If so, how to measure or validate? If not, should there be possible modification can improve the adaptability of the theory?\n\n3. There are researches that have out that, the neural network tends to learn low-frequency features (which are nonlocal and varied smoothly) more effectively than high-frequency features. Some connect such behaviour to the generalization capacity of over-parameterized neural networks. Do you think there is any relation between this theory and the ones developed in this paper? I want to hear about the comments of the authors. \nI list a few papers for reference:\nXu, Zhi-Qin John, et al. \"Frequency principle: Fourier analysis sheds light on deep neural networks.\" arXiv preprint arXiv:1901.06523 (2019).\nXu, Zhi-Qin John, Yaoyu Zhang, and Tao Luo. \"Overview frequency principle/spectral bias in deep learning.\" Communications on Applied Mathematics and Computation (2024): 1-38."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper gives very thorough theoretical study of the problem. The hypothesis, assumptions and derivation are reasonable and sound. The results matched with the intuition well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a theoretical analysis of the relation between feature dependency and adversarial robustness. The authors studied a two-layer ReLU network on the simulated data and derived that the gradient descent tends to learn averaged features instead of isolated/robust ones. The results were proved thoroughly. Then, the paper provides a test on the derived theory on real-world datasets, showing that by incorporating more precise training labels, the model can learn isolated features rather than mixed ones, therefore achieving better adversarial robustness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The theory developed in this paper is phenomenological. It can be further improved if systematic and quantifying criteria can be built to design an optimization method that can help improve the robustness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "-Minor issues in notations: Big $O$ notation on line 185 is different from the others; $Theta$ notation is not defined.\n\n-Why does strong correlations indicate the existence of feature averaging? Is there a rigorous proof? How do you define the threshold of having a strong correlation?\n\n-Proposition 4.8 is used as an example of “adding more fine-grained supervision signals does not trivially lead to decoupled features and robustness”, but it does not follow the setup of your analysis, since $h=1$. How is this multi-class network obtained? Why is its parameter assumed to be the combination of $mu_s$?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "-This paper focuses on explaining the adversarial robustness of neural networks in classification problem, which of course is an important topic in ML/DL community.\n\n-This paper is generally well-written and logically organized with clear notations, assumptions, and detailed references to prior works with comparisons\n\n-This paper’s idea is intuitive: this paper characterizes the “approximate linearity” in networks under NTK assumptions as adversarial non-robustness in terms of Feature averaging."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyzed the training dynamics of gradient descent on a simplified two-layer ReLU networks in a binary supervised classification task, where the feature vector is sample from a gaussian mixture model with equal probability for K>2 components and the label are masked as 2 categories. The main result in section 4 showed that, under some regulatory assumptions and choices of hyper-parameters, the learned network turns to a feature averaging network, which has low generalization error but is not robust to perturbation of dataset."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Too many strong and unrealistic assumptions are used. First, the number of neurons $M=2m$ assumed to be finite as the number of clusters of features in assumption 4.3. It is against the purpose of using the neural tangent kernel (NTK) theory to approximate infinite wide neural networks. Thus, the result in this paper is limited since “training deep neural network is a highly non-convex and over-parametrized optimization problem”. Second, as the solution to adversarial robustness, “fine-grained supervision” suggests including the original cluster labels for all data points, which is extremely unrealistic, because the response is simply assigned from the cluster labels. It is not surprising that there is an improvement in adverbial robustness as the exact latent information is provided. Feature decoupling is a simple result as the network can learn each cluster’s mean given the cluster labels. \n\n- The evidence of feature averaging is shown Figure 2 as the strong correlations/cosine value between cluster’s mean and network weights, but the scale of correlations is not consistent. In particular, the results from CIFAR-10 are not convincing, where the average cosine value can be lower than 0.1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the fine-grained supervision case of the multi-cluster data, I understand that the binary classification using the multi-class network results in a robust solution. But, I wonder if the learned network would still be non-robust for the multi-class classification. \n2. It is not an ask to prove the following. I am only curious to understand and know the authors thought. The fine-grained supervision setting can also be seen as learning a more expressive/complex classifier (learn k classes to do binary classification). Then, can the authors comment on the adversarial min-max training? Would similar feature decoupling, ie the weight of each neuron is aligned with one cluster feature, happen naturally if one does adversarial training of the binary class data?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The theoretical analysis of the finite time gradient descent dynamics is thorough in the feature learning regime. \n* This paper is an interesting contribution in understanding the robustness of neural networks from optimization perspective. The analysis also aided in proving a conjecture. \n* The paper is well written and the presentation is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the implicit bias of gradient descent, which the authors call 'Feature Averaging' by theoretically analyzing two layer ReLU network with only a learnable first layer, under a specific data distribution - binary data having multiple clusters with orthogonal cluster centers. \nUnder gradient descent, weights of neurons in the hidden layer converge to an average of the cluster centers, resulting in a non-robust network. Additionally, they also prove that with more fine-grained cluster information, the network can learn a robust solution for the binary classification problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The assumptions on the data (orthonormal equinorm and multi-cluster) is restrictive.\n* The proof is detailed and thorough for two layer ReLU with only one learnable hidden layer. Extending to deeper networks seems challenging."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q1: Can the fact that the network reaches an \"averaging\" solution be deduced from the KKT equations in Frei et al. (2022)?\n\nQ2: The implicit bias of GD for this problem in 2-layer neural networks is undesirable for $\\ell_2$ robustness. What about $\\ell_\\infty$ perturbations? It seems that this bias would help for $\\ell_\\infty$ perturbations, in contrast to the feature decoupling solution $NN_{FD}$. I would appreciate your thoughts on this.\n\nQ3: Relatedly, would it be possible to measure robustness in the experiments on CIFAR10/MNIST against $\\ell_\\infty$ perturbations? If there is evidence for larger $\\ell_\\infty$ robustness when training with additional supervision, then this would make the authors' claims much stronger.\n\nQ4: Figure 3: How do you measure perturbation radius? If, for example, you normalise MNIST images to be between 0 and 1, then a perturbation radius of 2.0 does not make any sense. Can you mention the ranges of the input for MNIST and CIFAR10?\n\nQ5: In line 882, you mention that \"In image classification tasks, Ilyas et al. (2019) visualized both robust and nonrobust features.\". However, this is not true to the best of my knowledge. I believe there are 2 works that have provided such visualizations [1, 2].\n\n[1]. G. Goh. A discussion of ’adversarial examples are not bugs, they are features’: Two examples of useful, non-robust features. Distill, 2019.\n[2] Tsilivis, N. and Kempe, J. (2022). What can the neural tangent kernel tell us about adversarial robustness? Advances in Neural Information Processing Systems 35."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "This is a good paper, and it was a pleasure to read and review. I would like to highlight the following contributions:\n\n- Important topic: The interplay between the implicit bias of optimization and robustness is, in my opinion, an important area of study.\n- Finite time theoretical analysis of neural networks trained in the various settings: The paper rigorously analyzes the training trajectory of the networks. I enjoyed reading and learning about interesting techniques, such as the ones outlined in Section C.2. In general, the Appendix of the paper is exceptionally polished and the results are easy to follow and verify (to the extent that this was possible during the review process - the paper is 67pages long).\n- Interesting suggestion on providing additional supervision during training for improved robustness: I found this observation interesting and the experimental results seem to suggest that these ideas may generalize beyond the toy distribution considered in the paper's analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper theoretically studies the robustness of two layer neural networks under a specific data distribution that consists of $k$ clusters in $\\mathbb{R}^d$ and the target consists of deciding on which group of clusters the input belongs to (binary classification). The authors show that these networks tend to learn an \"averaging\" solution (i.e., the first-layer weights encode the averages of the clusters), making the networks susceptible to $\\Omega(\\sqrt{d / k})$ perturbations of the input (despite the existence of networks capable of tolerating $O(\\sqrt{d})$ perturbations). Motivated by this observation, they propose modifying the training process by using each example's cluster vector as the label, and they prove that this approach indeed yields optimally robust networks. Finally, the paper presents experiments with both synthetic data and image classification benchmarks that contextualize the theoretical findings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) The main conceptual observation does not appear to be new. To my understanding, Frei et al. (2022) showed that neural networks trained with gradient descent on this distribution are not robust to $\\ell_2$ perturbations of the input due to the implicit bias of optimization. In lines 153-171, the authors argue that their work differs from prior work because they do not explicitly analyze the properties of the KKT point of the max-margin problem and instead perform a finite-time analysis. Furthermore, they argue that convergence to the KKT points might be slow. However, we know from Liu & Li (2020) that these conditions are approximately satisfied as long as there is a positive margin. Thus, approximate results are conceptually consistent with the KKT analysis. Additionally, I believe that the fact that the network reaches an \"averaging\" solution can be deduced from the KKT equations in Frei et al. (2022)—see also (Q1).\n\n2) The paper argues that \"feature averaging is a principal factor contributing to non-robustness of neural networks\" (line 014). However, it focuses on one type of perturbations. While the authors provide evidence in this direction for $\\ell_2$ perturbations in a simple distribution (& experimental results in more complicated settings), it is not clear that this holds more broadly. Note that the model’s solution is exceptionally contrived, as all neurons (within each group) converge to the same solution. See also (Q2, Q3).\n\n2) While the paper is well-written, I found the discussion on prior work somewhat insufficient. I would appreciate a bit more clarity on what is novel about the first part of the contributions (i.e., the proof that networks reach feature-averaging solutions). Furthermore, in line 110, the authors mention the work of Min & Vidal (2024) and their conjecture for the first time. It would be more appropriate to introduce this work and its conjecture earlier in the introduction, making it easier for readers to appreciate the contribution.\n\n4) A list of non-significant typos/grammatical errors I spotted:\n- line 091: features-> feature\n- line 107: exists-> exist\n- line 167: analyze -> analysis\n- line 261: neural -> neuron\n- line 893: echos -> echoes\n- line 1098: We -> we\n- line 1100: margin -> margins\n- lines 1307-1310 require rephrasing (they do not make sense),"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "In this paper, we provide a theoretical explanation for non-robustness in trained neural networks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024feature,\ntitle={Feature Averaging: An Implicit Bias of Gradient Descent Leading to Non-Robustness in Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPHra4V5Mc},\nnote={under review}\n}"
},
"abstract": {
"value": "In this work, we investigate a particular implicit bias in the gradient descent training process, which we term “Feature Averaging”, and argue that it is one of the principal factors contributing to non-robustness of deep neural networks. Despite the existence of multiple discriminative features capable of classifying data, neural networks trained by gradient descent exhibit a tendency to learn the average (or certain combination) of these features, rather than distinguishing and leveraging each feature individually. In particular, we provide a detailed theoretical analysis of the training dynamics of gradient descent in a two-layer ReLU network for a binary classification task, where the data distribution consists of multiple clusters with orthogonal cluster center vectors. We rigorously prove that gradient descent converges to the regime of feature averaging, wherein the weights associated with each hidden-layer neuron represent an average of the cluster centers (each center corresponding to a distinct feature). It leads the network classifier to be non-robust due to an attack that aligns with the negative direction of the averaged features. Furthermore, we prove that, with the provision of more granular supervised information, a two-layer multi-class neural network is capable of learning individual features, which is able to induce a binary classifier with the optimal robustness under our setting. Besides, we also conduct extensive experiments using synthetic datasets, MNIST and CIFAR-10 to substantiate the phenomenon of feature averaging and its role in adversarial robustness of neural networks. We hope the theoretical and empirical insights can provide a deeper understanding of the impact of the gradient descent training on feature learning process, which in turn influences the robustness of the network, and how more detailed supervision may enhance model robustness."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep learning theory",
"feature learning",
"adversarial robustness",
"implicit bias"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b90fb22bf29ecec13a7835f702e8bc726ebe4c88.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Feature Averaging: An Implicit Bias of Gradient Descent Leading to Non-Robustness in Neural Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPPy79qKWe | RLEF: Grounding Code LLMs in Execution Feedback with Reinforcement Learning | main | Active | Large language models;automatic code generation;reinforcement learning;LLM agents | foundation or frontier models, including LLMs | 3;5;5;5 | 3;4;4;4 | 2;3;3;2 | 2;3;2;2 | 3;4;2;3 | 4.5 | 3.75 | 2.5 | 2.25 | 3 | 1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In the proposed method, when an incorrect solution is generated, the model immediately moves on to the next iteration. Could the authors explore the effect of incorporating feedback on the incorrect code before proceeding to the next iteration? How would this impact the experimental results?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The method effectively incorporates self-correction and self-refinement techniques into the RLHF framework, leading to significant sample reduction.\n- It demonstrates enhanced utilization of multi-step feedback, contributing to improved performance and efficiency in iterative code generation tasks.\n- The approach addresses a critical limitation of current LLMs by successfully leveraging execution feedback, providing a practical and innovative solution for complex code synthesis problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel approach aimed at enhancing the performance of Large Language Models (LLMs) in multi-turn code generation tasks. The primary challenge addressed is the difficulty LLMs face in leveraging execution feedback for iterative code improvement, a crucial aspect of achieving reliable outcomes in multi-step code generation. The authors propose an end-to-end reinforcement learning method designed to teach LLMs to effectively use execution feedback. The paper makes three key contributions: (1) the development of a reinforcement learning framework that grounds LLMs in execution feedback, (2) demonstrated improvements in performance on code synthesis tasks, and (3) practical benefits including significant sample reduction and enhanced utilization of multi-step feedback."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The method's novelty is limited, as it mainly adapts self-correction and self-refinement techniques to the RLHF framework. Similar approaches have already been explored, such as Anthropic's work on iterative code correction[1] and DeepMind's adaptation of self-correction in RLHF for improved training and inference efficiency[2]. Additionally, the core idea of using compiler feedback as a reward signal for RLHF lacks sufficient comparative analysis with contemporary methods that integrate RLHF with compiler feedback, such as RLTF, Execution-based Code Generation using Deep Reinforcement Learning, CodeRL, StepCoder, and $\\mathcal{B}$-Coder.\n2. The experiments are limited to the CodeContest dataset, with a relatively small number of validation and test samples. The paper should validate its effectiveness on more datasets, such as APPS, which is commonly used for training in RLHF+compiler feedback research.\n3. The paper claims that the multi-turn iterative approach of SFT performs significantly worse than RL (as mentioned in the appendix). However, self-improvement methods like critic+SFT have proven effective in code tasks. Providing the SFT and RL code as open-source could help validate this experimental conclusion.\n\n[1] Improving Code Generation by Training with Natural Language Feedback\n\n[2] Training Language Models to Self-Correct via Reinforcement Learning"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How robust is RLEF to feedback of varying quality beyond random feedback? For instance, how does the model perform if unit tests are incomplete or only cover trivial cases? How does the performance of RLEF change with different types or amounts of execution feedback? Is there an optimal amount or type of feedback for maximizing performance improvements?\n\n2. The paper sets a fixed turn limit in the multi-turn setup. How does this choice affect model performance, and could RLEF benefit from a dynamic turn limit based on feedback content or problem complexity? How sensitive is the model to the number of feedback turns, particularly with respect to diminishing returns after a certain number of turns?\n\n3. The paper mentions using a hybrid approach for value estimation (response-level) and policy optimization (token-level). Can you elaborate on why this approach works better than optimizing both at either the turn or token level?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The experimental results show general improvement when applying RLEF with Llama 3 series models on the CodeContests dataset, demonstrating the effectiveness of the proposed method.\n\n2. The authors demonstrate that Llama 3.1 models trained on CodeContests with RLEF can generalize to other datasets like HumanEval+ and MBPP+, especially the 70B model. This generalization capability makes the proposed method more appealing and suggests broader potential applications.\n\n3. The behavioral analysis post-RLEF training provides valuable insights into how the model learns to handle feedback differently. The detailed error analysis (e.g., fewer repetitions, more targeted edits, reduced reliance on random sampling) gives empirical weight to claims about the improved robustness of RLEF-trained models in multi-turn setup."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces RLEF, a reinforcement learning framework that enhances code synthesis for large language models (LLMs) by utilizing execution feedback during training. RLEF allows models to iteratively refine code solutions based on execution feedback from test cases, significantly improving solve rates on competitive programming tasks and reducing sample requirements. The authors benchmark RLEF with Llama 3 models on CodeContests and demonstrate state-of-the-art performance, with generalizability to HumanEval+ and MBPP+ benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The scientific novelty of RLEF is somewhat limited. Although it extends previous work by introducing a multi-turn setup, the core concept of using unit test feedback as a reward signal has already been proposed in the literature, such as in RLTF [1], which uses unit test feedback within an online RL framework for code generation. The primary advancement in RLEF lies in iterating on this approach by incorporating execution feedback as input text across turns, making this more of an empirical extension than a conceptual breakthrough. \nTo clarify the novel contributions, the authors could explicitly outline how RLEF builds upon and differs from RLTF and related works. Including a citation to RLTF would also better contextualize RLEF within the existing literature, helping to position its contributions more clearly.\n\n2. While the experiments on CodeContests show the effectiveness of RLEF, high-quality unit tests like those provided in CodeContests are hard to obtain in practice or from other data sources. The authors didn't study how the quality of the unit tests (beyond just random feedback) would affect the effectiveness of RLEF. For example, if the public unit tests only cover simple input cases, the execution feedback might struggle to find bugs, imperfections, or inefficiencies in the code, thus providing less useful feedback. This limitation might hinder the application range of this method. \nIt would strengthen the paper to include an analysis on RLEF's sensitivity to unit test quality. The authors could consider testing the method with varying qualities of unit tests, or provide a discussion on approaches to generate high-quality tests in real-world environments where comprehensive unit tests are scarce. Specific experiments that address this limitation would add valuable depth to the study.\n\n3. The paper’s presentation, particularly in the Method section, needs improvement. (a) The organization is overly condensed, and crucial methodological details are packed into dense paragraphs, making it challenging for readers unfamiliar with PPO or RL for code generation. Key adjustments in the RL setup are mentioned briefly without adequate justification, making reproduction difficult and potentially deterring readers from fully engaging with the methodology. Expanding on crucial areas, particularly the PPO implementation details and any task-specific adjustments, could improve clarity. Breaking down dense sections into distinct, digestible subsections would also enhance readability. (b) Additionally, the primary results section compares models across different n@k metrics with varying n and k values, which may not be intuitive for readers unfamiliar with this metric. A more consistent comparison framework or additional explanation of these metrics would improve clarity.\n\n\n[1] Liu, J., Zhu, Y., Xiao, K., FU, Q., Han, X., Wei, Y., & Ye, D. RLTF: Reinforcement Learning from Unit Test Feedback. Transactions on Machine Learning Research. (2023)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This method is very intuitive, and modeling reinforcement learning as an interactive task is reasonable.\n- The experimental results are good in training language models for coding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors introduce a new approach for LLMs coding abilities that utilizes execution feedback from generated code through reinforcement learning to enhance performance in code synthesis tasks. The core insight is that LLMs can effectively follow user instructions while leveraging execution feedback to identify and correct errors in the generated code. The authors conceptualize the code generation process as an interactive task, proposing an environment where LLM-based agents generate code and receive real-time execution feedback. Their reinforcement learning-based method, termed RLEF, optimizes the performance of LLMs through this interactive framework. The results demonstrate that RLEF improves the correctness of the generated code compared to existing methodologies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Considering previous methods for training LLM coding and reasoning capabilities with reinforcement learning [1][2], the innovation is limited, as they also used some specially designed reward functions.\n- There is no ablation on the reward function, which is important for the paper.\n- There is a lack of experiments on more models to verify the generality of the method.\n- It lacks some related work on training LLMs for reasoning and coding with reinforcement learning [1][2].\n\n[1] Coderl: Mastering code generation through pretrained models and deep reinforcement learning.\n\n[2] Training Large Language Models for Reasoning through Reverse Curriculum Reinforcement Learning."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "N/A"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written and clearly presented. I appreciate the clarity and organization in the presentation of the results.\n2. The proposed method appears to be effective, substantially enhancing code generation performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel method to enhance LLM code synthesis through the use of execution feedback. In contrast to existing works that use unit test feedback as binary indicators or fractions of unit test pass rates to improve code generation, the feedback in this work is provided as language descriptions, including error messages and unit test results. Additionally, the proposed method is iterative, with the model trained to self-correct its previous responses based on received execution feedback. Experimental results indicate that this approach significantly improves LLM code synthesis capabilities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Practical Application:**\n While effective, the proposed method seems highly constrained in its practical application. The reliance on unit tests for feedback is a significant limitation, as generating accurate unit tests for arbitrary user prompts is often as challenging as solving the problems themselves. This confines the method to specific OJ style problems, where unit tests are readily available, and cannot be trivially extended to more general user scenarios where unit tests are not available.\n\n2. **Advantage Computation:**\n The way how the authors compute the advantages seems to be weird. The authors claim the action space is defined on the token level, but the advantages for every token in the same turn is the same. In that sense, how does the critic be updated? Can the authors intuitively / empirically explain why they do this?\n\n3. **Effectiveness of Multi-turn Feedback:**\n The empirical success of multi-turn feedback is intriguing. It is unclear whether the iterative nature of the algorithm or the unit tests' guidance contributes more to this success. An ablation study could clarify this by training RLEF iteratively while providing intermediate/final feedback with only numerical values, avoiding specific details on failed unit tests and expected outputs. For example, assigning smaller reward signals at the end of each turn and a larger signal at the end of the episode based on whether the generated code is correct or the fraction of unit tests passed could yield insights.\n\n#### Overall Assessment:\nWhile the method shows promising empirical results, I am inclined to reject this submission due to its limited applicability and some missing crucial details and ablation results. However, I am open to revising my score if the authors can provide evidence demonstrating the method's applicability to real-world user scenarios and offer more detailed explanations about the algorithm's design and efficacy."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We apply reinforcement learning to ground LLMs in execution feedback for effective multi-turn code generation."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024rlef,\ntitle={{RLEF}: Grounding Code {LLM}s in Execution Feedback with Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPPy79qKWe},\nnote={under review}\n}"
},
"abstract": {
"value": "Large language models (LLMs) deployed as agents solve user-specified tasks over multiple steps while keeping the required manual engagement to a minimum. Crucially, such LLMs need to ground their generations in any feedback obtained to reliably achieve desired outcomes. We propose an end-to-end reinforcement learning method for teaching models to leverage execution feedback in the realm of code synthesis, where state-of-the-art LLMs struggle to improve code iteratively compared to independent sampling. We benchmark on competitive programming tasks, where we achieve new start-of-the art results with both small (8B parameters) and large (70B) models while reducing the amount of samples required by an order of magnitude. Our analysis of inference-time behavior demonstrates that our method produces LLMs that effectively leverage automatic feedback over multiple steps."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large language models",
"automatic code generation",
"reinforcement learning",
"LLM agents"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/84d5bdcceff45e8b4abef9ae9bd2ff30b62feccc.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "RLEF: Grounding Code LLMs in Execution Feedback with Reinforcement Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPRQ7wtwhb | Salutary Labeling with Zero Human Annotation | main | Active | Active learning;influence function | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5;6 | 4;4;4;4 | 1;3;2;3 | 1;2;2;3 | 2;3;4;3 | 4.25 | 4 | 2.25 | 2 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1)\tIn addition to the logistic regression model used, is the proposed method suitable for more complex models? In Appendix E, a surrogate model is employed to compute the influence, but could the ResNet itself be directly used for influence calculation? \n2)\tSince the accuracy of the pseudo-labels depends on the quality of the validation set, how does the performance of the proposed method vary with different validation set sizes?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Solid experiments are conducted on both tabular and image datasets, integrating recent data selection methods from AL and SSL. The proposed method demonstrates promising empirical results without the need for human-annotated labels. In addition, the method is validated on a LLM fine-tuning task, which further underscores its potential for application in different domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel pseudo-labeling approach for unlabeled data using influence functions. The proposed method estimates the influence of each possible label on the validation loss for a given unlabeled data point and assigns the label with the most significant improvement in loss as its pseudo-label. Subsequently, a subset of the unlabeled data with the highest improvement in validation loss is selected to update the model. Extensive experiments are conducted to validate the effectiveness of the method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the method is framed in part within the active learning context, its approach—assigning pseudo-labels to unlabeled data via a self-training mechanism—seems more aligned with semi-supervised learning. The introduction could benefit from adjustments to reflect this alignment more accurately. Another concern is the limited technical contributions. It uses the influence function to score and pseudo-labeling the unlabeled data. Although this is an interesting application, it may not represent a substantial methodological advance. Furthermore, in the experiments, a query budget of 10 examples is set, yet only the first 10 rounds of performance are reported. Providing results for additional rounds or scenarios with a larger query budget would offer a more comprehensive evaluation of the method’s long-term effectiveness. In Appendix D, results from querying 1% of the data reveal that the proposed method underperforms relative to other baseline methods. This needs further exploration and explanation. My last concern is that the paper’s approach relies on setting aside 20% of the data for validation, which may be impractical for certain active learning settings, where labeled data is typically scarce. Performances on different sizes of validation set should also be explored."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "My main concern is the weakness 1. I do not understand the value of their new proposed task. I think this setting is similar to the unsupervised learning with the pseudo labels. In my understanding, I think the human annotation is helpful during training. This is the reason why we study active learning. If we fully abandon human intervention, this is totally another area which is unsupervised learning."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The motivation and paper writing are clear. \n2. The experiment is sufficient \n3. The method is fully automatic without human annotation"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes salutary labeling, which automatically assigns the most beneficial labels to the most informative samples without human annotation. Specifically, they utilize the influence function, a tool for estimating sample influence, to select newly added samples and assign their salutary labels by choosing the category that maximizes their positive influence."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. They do not discuss the difference with the unsupervised learning methods, such as \n\n[1] Self-paced Contrastive Learning with Hybrid Memory for Domain Adaptive Object Re-ID\n[2] Mutual Mean-Teaching: Pseudo Label Refinery for Unsupervised Domain Adaptation on Person Re-identification\n\nIf the human intervention is removed from active learning, it will be transformed to unsupervised learning that assigns the pseudo labels to the samples. Could you discuss the difference ?\n\n2. How to tackle with the situation that the selected labels are wrong? Could you discuss potential error correction mechanisms, or to analyze the impact of incorrect labels on model performance?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- The proposed approach appears closely related to training with noisy labeled data methods mentioned in the Weakness section. An analysis and comparison with this literature in the related work would be beneficial.\n- A detailed analysis of the assigned salutary labels would be interesting. It would be helpful to know if labels different from the ground truth sometimes enhance performance or if performance improvement primarily comes from refining noisy labels."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The *Salutary Labeling for Active Learning* framework was new to me. If we can effectively annotate data automatically without human annotator, it could demonstrate remarkable potential for machine learning as a whole.\n\n- Overall, the writing is well-structured and easy to follow, making it straightforward to understand the main concepts.\n\n- The proposed method consistently improved performance across nine datasets. This study also conducted detailed ablation studies to analyze the effectiveness of *salutary labels* through the influence function."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This study defines a task called salutary labeling, which involves selecting a subset of data from an unlabeled data pool predicted to be the most beneficial for training, similar to traditional active learning, and then using this selected data for model training. The key difference in the proposed salutary labeling approach is that, instead of using labels from human annotators, it assigns pseudo labels expected to improve performance on the validation set. Specifically, it measures the influence of each data point and assigns a salutary label that maximizes validation set performance. The data with the highest influence from the assigned salutary labels is then used for training. This method was tested on nine datasets and demonstrated improved results compared to traditional active learning approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The motivation behind combining salutary labeling with active learning is not fully clear to me.\n - The core motivation of active learning is to label only a small number of informative data points to reduce annotation costs. If automatic labeling without human annotation costs is feasible, applying salutary labels to all available data without the selection process in active learning should suffice.\n - In Algorithm 1, it seems that salutary labels are generated for the entire unlabeled pool during selection. Figure 5 also suggests that model performance improves as the number of salutary labels increases, so the necessity of sampling is unclear.\n - **Additional explanation on why the active learning framework remains effective despite not requiring human labor for labeling would help clarify my understanding.**\n\n- I am not fully convinced about how the salutary labeling task is novel compared to existing label-efficient tasks.\n - To me, the salutary labeling task appears to combine elements from various existing label-efficient tasks. If the goal is to automatically label training data, this could be viewed as a pseudo-labeling process in semi-supervised or self-supervised learning. On the other hand, methods for correcting label noise align more closely with the learning with label noise task [a, b, c].\n - In Appendix A, a key difference from other label-efficient learning tasks is described as a focus on active learning (line 836). However, as mentioned above, without a clear reason for combining salutary labels with the active learning framework, I remain uncertain about the task’s distinction from others.\n - **A clearer explanation on why salutary labels need to be combined with active learning and how this approach fundamentally differs from other label-efficient learning methods would be helpful.**\n\n- There seem to be potential fairness concerns in the comparative experiments with existing AL baselines.\n - The proposed experimental setup uses 20% of the dataset as a validation set and utilizes this validation set’s annotations for influence function calculations and labeling. If labeled data is essential for the proposed method, it would be fairer to allocate part of the training budget for this purpose.\n - Specifically, in Table 1, the budget sample size is set as low as 10, which makes it impractical to reserve 20% of the dataset as a validation set while only training on about 10 samples. This setup might give the impression that validation set labels are indirectly being used for salutary labeling of the unlabeled data.\n - As shown in Figure 5 of Appendix D, one potential reason for the reduced gain of the proposed method as the budget size increases may be the dilution of the validation set’s supervisory effect.\n - **The following additional experiments might strengthen the justification for salutary labeling:**\n - Using a smaller validation set (e.g., 1% to 5% instead of 20%).\n - Including the validation set size in the training budget for all methods (e.g., allowing AL baselines to train on the validation set or at least part of it).\n\n[a] Xiao, Ruixuan, et al. \"Promix: Combating label noise via maximizing clean sample utility.\" arXiv preprint arXiv:2207.10276 (2022).\n\n[b] Chen, Wenkai, Chuang Zhu, and Mengting Li. \"Sample prior guided robust model learning to suppress noisy labels.\" Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Cham: Springer Nature Switzerland, 2023.\n\n[c] Liu, Sheng, et al. \"Robust training under label noise by over-parameterization.\" International Conference on Machine Learning. PMLR, 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.In the last paragraph of Section 4, the authors mentioned that the time complexity of salutary labeling is O(nd). However, the proposed salutary labeling algorithm need to calculate the influence estimation of every data point in each iteration of active learning. How much will this slow down the entire training process? Can the authors provide the running time comparison results of each method in Table 1?\n\n2.In the experiment, the author set active rounds R = 10 and query budget b = 10. When b and R are larger, is it impossible to prove that the proposed salutary labeling is effective?\n\n3.The first paragraph of Section 2 is too long and a little bit difficult to read. It should be adjusted appropriately.\n\n4.The legend in Figure 1 obscures part of the polyline and may need to be further refined."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.The writing is good and the supplementary materials are relatively sufficient.\n\n2.The authors propose a simple-sounding but effective active learning method which eliminates the need for human annotation. Judging from the comparative experimental results provided by the authors, this idea is effective."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose salutary labeling for active learning which is human annotation-free. They adapt the influence function to calculate the sample influence by assessing the impact of each sample across all possible labels and assigning the label that yields the greatest positive influence. The authors conducted experiments on different datasets to verify the effectiveness of the simple idea."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.In the last paragraph of Section 4, the authors mentioned that the time complexity of salutary labeling is O(nd). However, the proposed salutary labeling algorithm need to calculate the influence estimation of every data point in each iteration of active learning. How much will this slow down the entire training process? Can the authors provide the running time comparison results of each method in Table 1?\n\n2.In the experiment, the author set active rounds R = 10 and query budget b = 10. When b and R are larger, is it impossible to prove that the proposed salutary labeling is effective?\n\n3.The first paragraph of Section 2 is too long and a little bit difficult to read. It should be adjusted appropriately.\n\n4.The legend in Figure 1 obscures part of the polyline and may need to be further refined."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024salutary,\ntitle={Salutary Labeling with Zero Human Annotation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPRQ7wtwhb},\nnote={under review}\n}"
},
"abstract": {
"value": "Active learning strategically selects informative unlabeled data points and queries their ground truth labels for model updates. The prevailing assumption in the active learning paradigm is that the acquisition of ground truth labels optimally enhances model performance. However, this assumption may not always hold or maximize learning capacity. Moreover, ground truth annotations incur significant costs due to the need for intensive human labor. In contrast to traditional active learning, this paper proposes salutary labeling, which automatically assigns the most beneficial labels to the most informative samples without human annotation. Specifically, we utilize the influence function, a tool for estimating sample influence, to select newly added samples and assign their salutary labels by choosing the category that maximizes their positive influence. This process eliminates the need for human annotation. Extensive experiments conducted on nine benchmark datasets demonstrate the superior performance of our salutary labeling approach over traditional active learning strategies. Additionally, we provide several in-depth explorations and practical applications including large language model fine-tuning."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Active learning",
"influence function"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/be6fa69740004965645dc66e1c95ec688eb801af.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Salutary Labeling with Zero Human Annotation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPaTnGjgpa | Can Stability be Detrimental? Better Generalization through Gradient Descent Instabilities | main | Active | Gradient Descent;Generalization;Optimization;Stability | optimization | 3;3;5;5;5 | 4;4;3;4;2 | 2;2;2;2;3 | 1;1;3;2;2 | 2;1;3;3;2 | 4.2 | 3.4 | 2.2 | 1.8 | 2.2 | -0.612372 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "077 what is the role of this threshold $S(\\theta)$ is it on the sharpness? If so we don’t pick it but algorithm trajectory can enforce it, so the sentence “beyond which training is thought to destabilize” is misleading.\n\nIs the theoretical claim about diagonal linear network true for any matrix dimension and depth?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Authors study a new phenomenon about reorientation of eigenvectors of the Hessian during the instability phase of training NNs so that the trajectory can go to flatter regions, which seems new and interesting; even though intuitively it is not clear to me why this is necessary for going to flatter regions. Their claim oppose the claim in Damian et al (2023) that the gradient is well-aligned with the gradient of the sharpness during the instability phase."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors study a new phenomenon in training NNs which they call \"Progressive flattening\" during the instability regime when inverse of learning rate is larger than sharpness. In this regime, they claim, via empirical investigations and a theoretical result about a toy model for diagonal linear network (DLN), that loss goes to flatter regions by changing the eigenvector directions of the Hessian, while in the stable regime it seems the eigenvector directions are reinforced. Experiments show this regularization (starting with large learning rates) and gonig through these instability phases, also has a better effect on generalization They also challenge sharpness as a measure of generalization.\nMoreover, their results seem to contradict that of Damian et al 2023 about the behavior of the instability regime."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major concerns: \nThe relation between “growth of parameters” that the authors discuss with the progressive flattening and reorienting the eigenvectors, phenomenon is unclear. \n\nThe claims seems to oppose the sharpness largest eigenvector alignment with the gradient in the instability regime, so I think the experiments seem insufficient to back their claim, especially about reorienting the eigenvectors \n\nAuthors refer to reorienting the eigenvectors of Hessian as the major effect that algorithms goes back to stability, I can imagine reorienting the eigenvectors of the sharpness can have such effect, but the former is not clear to in why it should help. Authors seems not to explain this in their theorem for the toy diagonal model that. Additionally authors interchangeably also talk about the eigenvector of the sharpness Hessian instead of the Hessian of the loss, which is confusing which one is exactly the target of their claims.\n\nOther issues:\n033: is dependent —> depends\n054: in generalization —> in the generalization \n075: grow without bound —> grow unboundedly\nLine 087 definition of $\\alpha$ has an issue, there are two dots used after the two gradients\n092 last sentence not clear\n123: what is a “two-parameter” network?\n133: “sharpness hessian eigenvectors” -> you mean loss hessian eigenvectors?\n148: very unclear: what does “sharpest parameter” mean?\n157: “despite originating from same loss function” doesn’t make sense, are you referring to different coordinates of gradient being different? Also you are referring to $z$ as loss function, but isn’t that the link function in your notation?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* In Figure 5, the authors mark the 'stability threshold' using dotted vertical line. Is this stability threshold wrt initialization? \n* Can the authors contrast their work with existing literature referred above?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The analysis of Hessian eigenvectors rotation during instabilities is new, best to my knowledge.\n* The paper is clearly written and is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This research paper investigates the relationship between gradient descent instabilities and generalization performance in neural network training. Using toy model and experiments on small scales, they demonstrate that these instabilities cause rotations in the eigenvectors of the loss Hessian. Through various experiments on benchmark datasets like fMNIST and CIFAR10, the authors provide evidence supporting their claim that larger learning rates can significantly enhance generalization by promoting this implicit regularization effect."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The authors show that instabilities move training towards flatter regions of the loss landscape. However, This has been shown extensively show in prior works such [1, 4, 5, 6, 7].\n\n* The authors claim that the effect of large learning rates pertains late in training, which they dub it as progressive flattening. First, the phrase can be misunderstood as progressive decrease in sharpness, which has been studied in [4, 5]. I would suggest to use some other phrase for this phenomena. Second, I think the result directly follows from learning rate decay experiments from [9]. If the learning rate is decayed later in training, it would require more time to reach the new EoS threshold. If all models are trained for fixed training time, the sharpness will take more time to increase in the case where learning rate is decayed later in training. I would suggest the authors to train the models long enough such that all models reach the new EoS threshold and then compare the performance.\n\n* The authors claim that their empirical results reveal a clear phase transition where generalization benefits only emerge for\nlearning rates beyond the stability threshold. However, looking at Figure 5(b) the transition is not sharp, rather its continuous. I would not recommend using the phrase transition. Also, similar results are already shown in prior works [1, 4, 5, 6].\n\n* The authors claim that they identify that sharpness is not a great predictor of performance. However, this is already known in prior literature, such as [8]. In addition, authors claim that the rotation can be a better predictor than sharpness. But the experimental results are not conclusive. \nMoreover, my intuition is that such predictors can be misleading for very flat initializations (Figure 3. of [4]) which do not require instabilities to train at high learning rates. Therefore, I would predict there won't be any significant change in eigenvector rotation.\n\n* The authors claim that they identify that the effects of large learning rate are observed without progressive sharpening. Various prior works have already found that instabilities occur without progressive sharpening [1, 3, 4, 5, 6] and this result is not new. I would like to request the authors to highlight their new claims wrt these works. \n\n* The authors claim that they prove that for any network with depth > 1, instabilities cause rotation along the principle components of Hessian. This is only demonstrated theoretically for deep diagonal networks. This claim should be modified accordingly. \n\n* Similar models to the two parameter model studied in Section 3.1 is extensively analyzed in prior studies, such as [1, 2, 3]. \nIn particular, when z is quadratic (which is mainly considered in this work) and the global minima is at zero at origin $\\theta_1 \\theta_2 = 0$ has been analyzed in [1] to demonstrate the catapult effect. As discussed in [3], this model does not exhibit progressive sharpening (also can be seen in equations in [1]), this model does not exhibit progressive sharpening and therefore, this model does not capture real-world behavior well. \n\n[1] The large learning rate phase of deep learning: the catapult mechanism\nA Lewkowycz, Y Bahri, E Dyer, J Sohl-Dickstein, G Gur-Ari\narXiv preprint arXiv:2003.02218\n\n[2] Understanding Edge-of-Stability Training Dynamics with a Minimalist Example \nXingyu Zhu, Zixuan Wang, Xiang Wang, Mo Zhou, Rong Ge\nICLR 2023\n\n[3] Universal Sharpness Dynamics in Neural Network Training: Fixed Point Analysis, Edge of Stability, and Route to Chaos\nDayal Singh Kalra, Tianyu He, Maissam Barkeshli\nICLR 2024 BGPT Workshop\n\n[4] Why Warmup the Learning Rate? Underlying Mechanisms and Improvements\nDS Kalra, M Barkeshli\narXiv preprint arXiv:2406.09405\n\n[5] Catapults in SGD: spikes in the training loss and their impact on generalization through feature learning\nL Zhu, C Liu, A Radhakrishnan, M Belkin\narXiv preprint arXiv:2306.04815\n\n[6] Quadratic models for understanding neural network dynamics\nL Zhu, C Liu, A Radhakrishnan, M Belkin\narXiv preprint arXiv:2205.11787\n\n[7] A loss curvature perspective on training instabilities of deep learning models\nGilmer et al.\nICLR 2022\n\n[8] On the maximum hessian eigenvalue and generalization\nS Kaur, J Cohen, ZC Lipton\nhttps://arxiv.org/abs/2206.10654\n\n[9] Gradient descent on neural networks typically occurs at the edge of stability\nJ Cohen, S Kaur, Y Li, JZ Kolter, A Talwalkar\nInternational Conference on Learning Representations"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- The authors claim that instabilities along the top Hessian eigenvector cause the changes in $S$. Is there any evidence for this? It seems that the main changes in sharpness in Figures 3,12 come from the overall curve changing, not from the sharpness differing at different parts of the curve.\n- What is the precise definition for progressive flattening?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The loss landscape visualizations in Figure 3 and Figure 12 are a nice way to visualize the sharpening/flattening process"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the dynamics of gradient descent in the edge of stability regime. It begins by analyzing gradient descent on a two parameter model $L(\\theta) = z(\\theta_1 \\theta_2)$. It then shows that in realistic settings, the eigenvectors of the Hessian rotate during the EOS dynamics, and conjectures that these rotations are responsible for the edge of stability dynamics. They also propose a \"progressive flattening\" mechanism which argues that training with large learning rates for longer will lead to flatter solutions, and studies the connection between learning rates, sharpness, eigenvector alignment, and generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While Figure 2 supports the claim that the eigenvectors of the Hessian rotate at EOS, this is a correlational observation and doesn't support the claim of the paper that this rotation is relevant for stabilizing training, giving rise to the EOS dynamics, or exploring flatter regions of the loss landscape.\n- The paper does not precisely define \"progressive flattening\" – see my question below. In addition, the use of cross entropy acts as a strong confounder in these experiments, as the sharpness will converge to $0$ at the end of training. Therefore, a simple explanation for Figures 4,6 is that using a larger learning rate for longer will decrease the loss more, leading to a smaller overall sharpness. To establish progressive flattening as a distinct phenomenon, it would be valuable to repeat these experiments with MSE loss and train all models to near-zero training loss.\n- The paper mischaracterizes the progressive sharpening factor in Damian et al. 2023. Assumption 1 in this paper assumes $\\alpha > 0$ along their \"constrained trajectory,\" not the actual gradient descent trajectory. It is expected to be negative along the gradient descent trajectory since the sharpness decreases when self-stabilizing. Their experiments (Appendix E, top left) show $\\alpha > 0$ in all of the settings studied in Cohen et al. 2020.\n- This paper would benefit from a proper literature review. The only theoretical analyses of EOS cited are Damian et al. 2023 and Arora et al. 2022. There are many relevant missing papers, including papers that study EOS on diagonal linear networks [1,2,3], and [4] which observed a phenomenon possibly related to progressive flattening.\n\n[1] Chen and Bruna 2023: Beyond the edge of stability via two-step gradient updates\n\n[2] Even et al. 2024: (S)GD over Diagonal Linear Networks: Implicit bias, Large Stepsizes and Edge of Stability\n\n[3] Zhu et al. 2023: Understanding Edge-of-Stability Training Dynamics with a Minimalist Example\n\n[4] Kreisler et al. 2023: Gradient Descent Monotonically Decreases the Sharpness of Gradient Flow Solutions in Scalar Networks and Beyond\n\n\nMinor Points:\n- line 154: $theta$s -> $\\theta$s"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- I find the comparison to Damian et al. (2023) in the Related Work and Appendix D to be inaccurate. That paper proves that, during the unstable regime, 1) the parameters will oscillate and grow in the top eigenvector direction, and 2) via the third-order Taylor expansion, such oscillations will lead to an implicit regularization effect which decreases sharpness and drives the model to stability. This self-stabilization effect occurs whenever training is unstable, and does not require progressive sharpening. The progressive sharpening assumption only characterizes the fact that the sharpness increases during gradient descent for all learning rates, and hence training will eventually becomes unstable. To me, it seems that the \"progressive flattening\" mechanism presented in this work can be explained by the \"self-stabilization\" mechanism in Damian et al., (2023).\n\n- Line 244 \"Even after the instability is resolved, the similarity among individual eigenvectors fall while the subspace comparison remain largely similar\": This doesn't seem to be true in Figure 2, as after the instability the similarity is close to 1.\n\n- Much of the derivation in Section 3.1 is hard to follow. What are the quantities $\\gamma_\\Theta, \\gamma_{|\\theta_2^2 - \\theta_1^2|}$?Why is the stability limit $\\eta_{eos}$ defined to be twice the value of the asymptote?\n\n- In Figure 7, what are these quantities in the table? How is $\\rho$ defined? (i.e with respect to what is similarity calculated?)\n\nMinor comments/typos:\n- The descent lemma (line 68) does not necessarily require convexity.\n- Line 254: \"Figure E\" -> Figure 12."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- It is an important question to understand the dynamics of gradient descent in the large learning rate, or \"edge of stability\" regime.\n- The proposal of eigenvector rotation as a mechanism for reducing instability has not, to the best of my knowledge, appeared before in the literature.\n- The theoretical derivations in Section 3 appear to be sound."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies the \"edge of stability\" phenomenon, where neural networks can be trained with a larger learning rate than prescribed via the descent lemma. The central claim of this paper is that during instability, the eigenvectors of the Hessian rotate, thus moving the model parameters to a flatter region of the loss landscape -- the authors call this behavior \"progressive flattening.\" The paper provides theoretical support via a diagonal linear network derivation, and shows empirically that the top eigenvectors do rotate during periods of instability, and that large learning rates lead to improved generalization."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- My main concern with this paper is that it does not adequately support the claim that eigenvector rotation is the *cause* of instabilities being resolved. The paper provides evidence that during instability, the top Hessian eigenvectors rotate, and moreover afterwards the training becomes stable. However, there is no justification that such eigenvector rotation causes the return to stability. Justification for such return to stability has already been given in prior works such as the self-stabilization mechanism of Damian et al., (2023) or the catapult effect of Lewkowycz et al. (2020), and to me it appears that eigenvector rotation is itself caused by instability (yet goes away when other mechanisms cause a return to stability).\n- The definition of \"progressive flattening\" is not clear. My interpretation is that progressive flattening is the phenomenon that higher order derivatives of the sharpness are reduced during instability. However, this effect is not quantified nor easy to observe in the experiments in Section 3.2. \n- The justification in Section 3.3 is also insufficient. One alternate hypothesis to Figure 4 that I find more plausible is the following. First sharpness is constrained to $2/\\eta$ when training with learning rate $\\eta$. Later when $\\eta$ is decreased progressive sharpnening occurs, yet the degree of progressive sharpening is proportional to the loss gradient, which is smaller later in training. Therefore reducing $\\eta$ later leads to a lower final sharpness.\n- Many of the results in Section 4 on the generalization benefit of large learning rates are known in the literature. This benefit is because training with a learning rate of $\\eta$ implicitly constrains the sharpness to be at most $2/\\eta$; lower sharpness is believed to correlate with better generalization."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "For Figure 8:\n - Is 5 samples enough to estimate a rank correlation? Could you report the uncertainty in the rank correlation plot?\n - Why does the sharpness often have a high rank correlation with generalization performance, but a varying sign? (compared to rho which has a more consistent but smaller absolute correlation on average)."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper is well-written. In particular, the theoretical parts (sections 2 and 3) are clear and didactic. Similarly, while I'm not familiar enough with the literature to assess its exhaustivity, I found the related work well-written and easy to follow, giving context to the authors' work.\n\nI did not carefully check the computations but the theoretical parts seem rigorous.\n\nSection 4 provides rigorous experiments. We particularly appreciate that the authors try to empirically understand which specific factor of instabilities impacts generalization the most, and that they provide quite realistic experiments (section 4.4)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper tries to understand the instabilities during neural network training when using large learning rates and their impact on generalization.\n\nThe paper theorizes that unstable, high learning rates improve generalization by rotating Hessian eigenvectors, helping the model explore broader regions in the loss landscape. Using a Diagonal Linear Network (DLN) as a toy model, the authors show that unstable parameter growth leads to these beneficial rotations. They confirm this effect experimentally with an MLP on fMNIST, where instabilities cause rotations of the sharpest Hessian eigenvectors.\n\nThen, the authors directly study generalization. They empirically study the impact of large learning rates in different settings (MLP/VGG, CIFAR/MNIST), showing that increasing the learning rate way beyond the theoretical stability limit improves generalization. The authors extend their analysis to decreasing learning rate schedules and try to untangle the impact of different variables correlated with instabilities (learning rate, eigenvector rotation, sharpness), finding that all these variables impact generalization performance, though the eigenvector rotation seems to play a crucial role (compared to sharpness which is often studied in the literature)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Though it allows for interesting derivations, the DLN model is quite limited, and the paper will be stronger if it studies a more realistic model.\n\nI'm wondering how robust the conclusions in Figure 8 are (see questions).\n\n(Minor) Though I understand that it would require more compute that academic researchers might not have, experiments on harder tasks with more modern models would enrich the paper.\n\nSome figures are quite hard to read. In general, I would recommend increasing the font size, especially for Figures 5 and 6.\n\nFor Figure 2: it can seem that the $S(\\theta)$ plot is done in the lr reduction setting \n\nFor Figure 7, I find the 3d plot hard to read.\n\nFor Figure 8, it took me some time to understand what was represented at the top.\n\nFor Figure 9, it might be interesting to report the result for lr=6.4 to show that 3.2 is the last stable value.\n\nThe code is not currently available, though it seems that it contains interesting contributions (for instance a new Jax implementation of Hessian-Vector product).\n\n\n### Typos\n\n- Arora et al. (2022) demonstrated that gradient descent can lead to alignment between gradient the sharpest eigenvector of the Hessian \n- where exactly derivations are possible"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This work identifies the implicit regularisation effects of instabilities in gradient descent, attainable through the use of large learning rates."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024can,\ntitle={Can Stability be Detrimental? Better Generalization through Gradient Descent Instabilities},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPaTnGjgpa},\nnote={under review}\n}"
},
"abstract": {
"value": "Traditional analyses of gradient descent optimization show that, when the largest eigenvalue of the loss Hessian - often referred to as the sharpness - is below a critical learning-rate threshold, then training is ‘stable’ and training loss decreases monotonically. Recent studies, however, have suggested that the majority of modern deep neural networks achieve good performance despite operating outside this stable regime. In this work, we demonstrate that such instabilities, induced by large learning rates, move model parameters toward flatter regions of the loss landscape. Our crucial insight lies in noting that, during these instabilities, the orientation of the Hessian eigenvectors rotate. This, we conjecture, allows the model to explore regions of the loss landscape that display more desirable geometrical properties for generalization, such as flatness. These rotations are a consequence of network depth, and we prove that for any network with depth $> 1$, unstable growth in parameters cause rotations in the principal components of the Hessian, which promote exploration of the parameter space away from unstable directions. Our empirical studies reveal an implicit regularization effect in gradient descent with large learning rates operating beyond the stability threshold. We find these lead to excellent generalization performance on modern benchmark datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Gradient Descent",
"Generalization",
"Optimization",
"Stability"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/12415da43e1311d6a1358172ecac64373ec80a7a.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Can Stability be Detrimental? Better Generalization through Gradient Descent Instabilities"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPoW8CajCN | Fractal-Inspired Message Passing Neural Networks with Fractal Nodes | main | Active | graph neural network;message passing neural network | learning on graphs and other geometries & topologies | 3;5;6;6 | 4;3;2;3 | 1;2;3;3 | 1;2;3;3 | 2;3;3;3 | 5 | 3 | 2.25 | 2.25 | 2.75 | -0.866025 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In principle, a different sub-graph partitioning could be used to determine how to select subgraphs and assign additional nodes. Why is it sensible to use self-similarity as a criterion to select these subgraphs?\n\nI have questions regarding the equation (2-4) encapsulating message passing with fractal nodes: As far as I can tell, from eq. (2), there is only messge passing _within_ each subgraph $G_c$, as there is a subscript $c$ present for all \n\n$h^{(\\ell)}_{u,c}$\n\naggregated in the message function $\\Psi^{(\\ell)}$. Is this just a problem in the notation (I assume that the statement $u \\in \\mathcal{N}_\\nu$\n\n is supposed to mean that infact not only the intersection of $G_c$ and the neighbourhood $\\mathcal{N}_\\nu$ is relevant).\nFurthermore: As far as I can tell, there is no message passing between the fractal nodes. This information mixing only (potentially) happens in the last layer via the MLP mixer (c.f. eq. (8)). Why not also consider message passing on the coarse grained graph made up of all the fractal nodes?\n\nHow self-similar are the two graph structures (original graph and graph with fractal nodes)? It seems the METIS algorithm does not maximize self-similarity as its objective, but rather maximizes in-cluster connections while minimizing inter-cluster connections. This makes me wonder how adept the name fractal nodes truly is.\n\nIn Figure 5, why is there essentially no drop in accuracy until $r = 7$ with a stark drop (to zero) when $r = 8$? Could the authors comment on the origin of this abrupt change?\n\nCould you explain in more detail how the norm distribution experiment (at the beginning of Section 4.1) allows to draw conclusion about local and global information."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well structured. With its focus on the incorporation of long-range information distillation into message passing networks, it adresses a timely and important problem. The idea of aggregating local subgraph information into aggregate nodes is well founded. It is great to see that the proposed architecture is able to match or superseed transformer performance on graph level datasets. Beyond this, it is good to see that the authors did not only investigate their method solely in this setting, but in total investigate four different aspects of their proposed model in the experimental section. In particular the performance gain that the proposed fractal nodes bring in the synthetic expressivity setting of Section 5.2 is intriguing. The conducted ablation studies round off the paper well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents an approach to enhance Graph Neural Networks (GNNs) by introducing the concept of so called fractal nodes. Most common GNN architectures are based on the Message Passing Framework (Message Passing Neural Networks (MPNNs)). Such architectures face challenges in balancing the incorporation of global information with local information propagation mechanisms. In particular, MPNNs struggle with problems like over-smoothing and over-squashing, which hinder their performance in capturing long-range dependencies. Here, Graph Transformers provide a mechanism to incorporate long-range interactions. However, they can be computationally expensive and often overlook the local structures that MPNNs excel at capturing. To overcome these problems in MPNNs, the authors find inspiration in the fractal nature of many real-world networks, which exhibit self-similarity across different scales. The authors propose that this property can be leveraged to enhance information flow in GNNs: In order to do so, so called \"Fractal Nodes\" are introduced into given graphs: The proposed fractal nodes coexist with original nodes and each fractal node summarizes information from its corresponding subgraph while maintaining direct connections to the original nodes. This design allows for an aggregation of both local and global information.\nExperiments demonstrate that GNNs enhanced with fractal nodes achieve performance comparable to or better than state-of-the-art graph Transformers on various graph-level-task datasets. It is also shown that this approach shows certain benefits when tackling oversquashing."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I would argue that a main weakness of the experimental section is the lack of (real world) node level tasks. I am aware that The TreeNeighboursMatch task is a node level task, and I think that it is great that this is included. However, it would be good to see how significant the inclusion of the proposed fractal nodes is for standard node classification benchmarks. Are long-range interactions a problem here and do the proposed fractal nodes alleviate the problem if it exists in this setting?\n\nI am unsure how strong the connection to renormalization techniques is. From a heuristic perspective, I understand that a coarse graining nature is present in both the renormalization group in Physics and when summarizing subgraphs into single nodes. But are there any deeper connections between these two settings? If not, it might be good to make it clearer that this is only used as a heuristic comparison in the present paper.\n\n\n\n\nIt seems to be a missed opportunity that no (even preliminary) theoretical analysis of expressivity and mitigation of the oversquashing phenomenon was conducted. Could the authors add some details here?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"Yes, Discrimination / bias / fairness concerns"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The expressive power evaluation in Section 5.2 is impressive. However, given the absence of a formal proof regarding this expressive power, could you share the code detailing how the method achieved a score of 100 in CSL and SR25?\n\n2. Additionally, we were unable to reproduce the results shown in the paper using the provided source code.\n - Runtime (seconds per epoch) on a 3090 machine: 58s (our evaluation) vs 5.03s (paper) \n - 0.27, 0.252, and 0.2474 in 3 runs vs 0.2464±0.0014 (paper); Could the authors clarify the number of runs used to obtain the best result?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1. The method is straightforward with low complexity and seems to be effective based on the results obtained.\n\n2. The authors conducted a variety of benchmarking experiments and meticulously outlined the settings for each benchmarking experiment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes 'fractal nodes' to enhance Graph Neural Networks, addressing the over-squashing. By integrating these nodes into GNN, the method improves long-range dependency modeling, achieving competitive performance with graph Transformers while maintaining computational efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed framework appears to lack novelty, resembling a combination of ClusterGCN[1] and VirtualNode methodologies. The use of Metis clustering, akin to ClusterGNN, and the introduction of fractal nodes that connect to all nodes within a cluster, essentially reflect a variation of the VirtualNode concept.\n\n2. The misapplication of LPF and HPF is evident. As per the definition of LPF (Equation 6), it represents the mean of every node feature within a cluster, failing to capture the graph structure or low-frequency graph signal components. Similarly, HPF (Equation 7), calculated as the original feature minus LPF, also fail in capturing high-frequency graph signal components. It is crucial for the authors to revisit the fundamentals of spectral graph theory to refine their understanding of these concepts.\n\n3. The analysis of the properties in Section 4 appears superficial. The L2 norm distribution of node feature merely indicates the smoothing of node features and does not inherently reveal whether local or global information is being captured. The claim that the proposed method is more balanced lacks rigor, it is essential to provide concrete metrics for comparing differences before making such assertions.\n\n4. The analysis comparing the proposed method to graph coarsening techniques and virtual nodes is inaccurate. Firstly, the fractal nodes are not dynamically generated during training, they are precomputed by METIS. Secondly, virtual nodes do not always utilize mean pooling as an aggregator, instead, they commonly function as regular graph nodes, sharing the same hidden space with other nodes in most scenarios.\n\n5. The author claim the core concept of 'fractal nodes for enforcing self-similarity,' yet upon reviewing the entire paper, the practical impact of 'self-similarity' on enhancing GNN performance remains unclear to me.\n\n[1] Wei-Lin Chiang, Xuanqing Liu, Si Si, Yang Li, Samy Bengio, and Cho-Jui Hsieh. Cluster-gcn: An\nefficient algorithm for training deep and large graph convolutional networks. In Proceedings of the\n25th ACM SIGKDD international conference on knowledge discovery & data mining, pp. 257–266,\n2019."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In Table 2, the experiments focus mainly on graph-level tasks. How does the method perform on node-level tasks, such as node classification on the ogbn-arxiv dataset? Additionally, can you discuss any potential challenges or modifications needed to apply the proposed method to node-level tasks?\n2. In Table 2, why does GCN+FN_M perform worse than GINE/GatedGCN+FN_M? Additionally, what factors contribute to the proposed method performing worse than GRIT and only comparably to Graph-ViT/MLP-Mixer and Exphormer on MNIST/CIFAR10? Can you explore specific architectural differences that might explain the performance gaps?\n3. When splitting graphs into subgraphs, how many nodes are contained in each subgraph in the experiments? Does each subgraph have a similar number of nodes, and how does the number of nodes in each subgraph affect performance? Is there any trade-off involved in choosing subgraph sizes?\n4. Lines 510–513 mention that 'the higher the number of fractal nodes, C, the better the performance.' What is the ratio of fractal nodes to the total number of nodes? Is there a threshold where increasing C leads to a decrease in performance?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper has a clear logical structure, making it relatively easy to read.\n2. The concept of using fractal nodes is interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces 'fractal nodes' to enhance MPNNs by capturing both local and global information efficiently. Fractal nodes address the over-squashing problem, allowing information to travel over long distances without significant loss, thereby addressing key limitations in standard MPNNs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The performance improvements of the proposed method in the experiments are not particularly significant.\n2. The paper could benefit from more in-depth discussion of the experimental results, beyond simply listing which method performs better."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "How sensitive is the method to different subgraph partitioning strategies? \nWhat criteria should be used to determine optimal partitioning?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Originality\n- The concept of fractal nodes is novel and well-motivated by real-world network properties\n- The approach offers a fresh perspective on handling the local-global information trade-off\n- Creative adaptation of renormalization concepts into neural network architecture\n\n2. Quality\n- Strong theoretical foundation drawing from established concepts in network science\n- Clear connection between fractal properties and the proposed solution\n- Comprehensive experimental validation including performance comparisons and ablation studies\n- Addresses known MPNN limitations (over-squashing) with a principled approach\n\n3. Clarity\n- Well-structured presentation with clear motivation and problem statement\n- Effective use of figures to illustrate concepts (especially Figure 1)\n- Logical flow from theoretical inspiration to practical implementation\n\n4. Significance\n- Provides an efficient alternative to computationally expensive graph Transformers\n- Maintains MPNN's computational advantages while improving its capabilities\n- Potentially applicable across various graph-based learning tasks"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel concept called \"fractal nodes\" to enhance Message Passing Neural Networks (MPNNs) by addressing their limitations in balancing local and global information processing. The approach is inspired by the fractal nature of real-world networks and renormalization techniques. The key innovation lies in creating fractal nodes that coexist with original nodes, where each fractal node summarizes subgraph information and integrates into the MPNN architecture. The method aims to improve long-range dependencies while maintaining MPNN's computational efficiency, presenting an alternative to graph Transformers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses\n\n1. Theoretical Analysis\n- The paper could benefit from a more rigorous theoretical analysis of why fractal nodes help with over-squashing\n- Limited discussion on the optimal number or size of fractal nodes\n\n2. Experimental Validation\n- The truncated content doesn't show complete experimental results\n- Need for more extensive comparisons with other approaches addressing similar limitations\n- Absence of ablation studies on the impact of different subgraph partitioning strategies\n\n3. Practical Considerations\n- Limited discussion on the computational overhead of creating and maintaining fractal nodes\n- Unclear scalability analysis for very large graphs\n- No discussion on potential limitations or failure cases"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024fractalinspired,\ntitle={Fractal-Inspired Message Passing Neural Networks with Fractal Nodes},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPoW8CajCN},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Neural Networks (GNNs) have emerged as powerful tools for learning on graph-structured data, but they struggle to balance local and global information processing. While graph Transformers aim to address these issues, they often neglect the inherent locality of Message Passing Neural Networks (MPNNs). Inspired by the fractal nature of real-world networks, we propose a novel concept, '*fractal nodes*', that addresses the limitations of both MPNN and graph Transformer. The approach draws insights from renormalization techniques to design a message-passing scheme that captures both local and global structural information. Our method enforces self-similarity into nodes by creating fractal nodes that coexist with the original nodes. Fractal nodes adaptively summarize subgraph information and are integrated into MPNN. We show that fractal nodes alleviate an over-squashing problem by providing direct shortcuts to pass fractal information over long distances. Experiments show that our method achieves comparable or better performance to the graph Transformers while maintaining the computational efficiency of MPNN by improving the long-range dependencies of MPNN."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"graph neural network",
"message passing neural network"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/9d8c44503c6f4f34aabd8366a3d1ec16df9422eb.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Fractal-Inspired Message Passing Neural Networks with Fractal Nodes"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zPxlHOLxmh | From Counseling Transcript to Mind Map: Leveraging LLMs for Effective Summarization in Mental Health Counseling | main | Active | Large Language Models;Visual-based Summarization;Mental Health Counseling | foundation or frontier models, including LLMs | 1;1;3;3 | 5;4;4;4 | 2;2;2;1 | 1;1;2;2 | 3;2;2;3 | 2 | 4.25 | 1.75 | 1.5 | 2.5 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The authors used only 20 samples in their experiments; have they considered using larger sample sizes to enhance the representativeness of their results? Is future validation of a larger data set planned?\n\n2. In the field of consulting, where there is quite a bit of work on automated summarization [1][2][3], how can it be demonstrated that the presentation of mind maps reduces cognitive load?\n\n[1] Extraction and Summarization of Suicidal Ideation Evidence in Social Media Content Using Large Language Models\n\n[2] Utilizing Large Language Models to Identify Evidence of Suicidality Risk through Analysis of Emotionally Charged Posts\n\n[3] Aligning Large Language Models for Enhancing Psychiatric Interviews through Symptom Delineation and Summarization"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The integration of LLMs, specifically GPT-4o Mini, with PlantUML for generating mind maps from qualitative counseling data is a creative and original contribution. This approach moves beyond traditional text-based summaries by providing a visual representation of complex counseling sessions, which is underexplored in the mental health domain."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a web-based summarization tool that uses Large Language Models (LLMs), specifically GPT-4o Mini, to transform counseling session transcripts into visual mind map summaries. By leveraging the MEMO dataset of counseling conversations, the tool extracts key points from qualitative dialogues and organizes them hierarchically using PlantUML syntax. Human evaluations based on accuracy, completeness, conciseness, and coherence indicate that the generated mind maps effectively capture essential information. The tool demonstrates potential in aiding therapists to quickly recall session details and reduce cognitive load by providing a clear, visual overview of counseling sessions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited Sample Size and Representativeness\n\nThe study utilizes only 20 randomly selected samples from the MEMO dataset for the evaluation. This small sample size is not sufficiently representative of the diverse range of counseling conversations that occur in real-world settings. As a result, the findings may not generalize well to broader applications. To strengthen the validity of the results, it would be beneficial to include a larger and more varied dataset. This expansion would help in capturing a wider array of counseling scenarios and linguistic nuances, thereby enhancing the robustness of the tool's performance and its applicability to different contexts.\n\n2. Reliance Solely on Human Evaluation Lacking Reproducibility\n\nThe evaluation of the generated visual summaries is based entirely on human assessments from a small group of participants who are researchers in the field of Information Technology, not mental health professionals. This approach raises concerns about the reproducibility and objectivity of the results. Human evaluations can be subjective and may vary significantly between different evaluators. Incorporating quantitative evaluation metrics, such as adapted versions of ROUGE or BLEU scores for summarization tasks, could provide more objective measures of the tool's performance. Additionally, involving mental health professionals in the evaluation process would offer insights that are more aligned with practical therapeutic needs, thereby increasing the reliability and validity of the findings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Since the current contribution is low, I think the authors can add another analysis of multilinguality. Maybe they can analyse how different languages affect the mind map creation."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Interesting problem of visual summaries is explored. Mind maps are an effective and quicker way of information communication, so using such things to help therapist is a good idea.\n- It is good that the authors have developed a web based tool which can help people.\n- The paper is easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose a method to construct a summarised mind map of counselling transcripts. They use the MEMO dataset, from which the authors extract 20 samples and perform their analysis on them. They developed a web-based tool to showcase the generated mind maps.\n\nWhile the topic of visual summaries is relevant to explore, this paper tackles it from an engineering perspective rather than a research perspective. This might make a good demo paper in some conference, however, I am not so sure about this being a part of the research main track."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- No substantial contribution is there in this paper for it to be a part of main research track, maybe a better fit for demo track for some conference.\n- Only 20 mind maps evaluated by only 3 people with using just 1 LLM as the summariser might not be enough to be considered as a thorough investigation. I would suggest the authors to conduct a more detailed analyses by using more LLMs in the place of GPT-4o Mini to summarise the transcripts and then perform a comparison of different LLMs here.\n- In a nutshell, the paper does not propose a new problem, does not have a new dataset, does not propose any novel method, and does not contain any interesting observations or analysis. It is a very engineering pov paper, which is not a good fit for this venue."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The paper seems like very half-baked at this point and a lot of analysis is missing for this study to be of use to readers. \nHow do the visual models compare to textual models on information capture ? \nDid you ask therapists to review the mind-map models and get feedback ? What was the response like ?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The authors provide a web based tool that can be used by mental health professionals for building mind-map visual diagrams of past notes\n- They show that their tool can build reasonably good mind maps evaluated using human experts."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "With the increase in number of patients requiring mental health care, the cognitive load on mental health professionals has increased a lot. A major part of it is referring to notes from the past sessions. Due to visual information being easier to process and remember, the authors propose a visual diagram based approach to summarize past notes instead of just pure text based summaries. They use a simple LLM based prompting mechanism to take in the transcript and output the summary in a structured way that is parsed into a PlantUML mind map syntax. They also build a web tool that can be used for this."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper lacks novelty - as it is merely an application of using LLMs for a very specific use case. The prompting itself is also not smart or novel in any way as it just has a structured output that is parsed into a mind-map. I dont believe this would be of interest to many people.\n- There are no comparisons to text based summaries for this use case and it is not clear how these visual summaries could be more useful to a mental health professional. Several ablations are missing.\n- The contributions is really just the prompt and the web based tool.\n- There is also no comparison between the use of different kind of models / LLMs use"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Why was only a subset of 20 transcripts used for evaluation?\nWhy were only three “participants” used for evaluation, and what were their qualifications?\nCould additional evaluation metrics or objective measures be incorporated?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Originality:\nThe paper introduces the use of mind maps for summarizing counseling transcripts, which is an interesting shift from typical text summaries. Applying language models to generate these visuals in a mental health context is a practical adaptation, though it builds on existing summarization methods.\nQuality:\nThe method is clearly explained but lacks depth in certain areas. While the pipeline from prompt design to mind map visualization is understandable, the approach could benefit from a stronger methodological foundation or more comprehensive testing. Human evaluations are included, but the evaluation setup could be more rigorous to fully support the tool’s effectiveness.\nClarity:\nThe paper explains the problem and solution in a straightforward way, with enough detail to understand the main approach. Visual examples help clarify how the mind maps work, although some sections could be streamlined for readability.\nSignificance:\nAddressing the cognitive load for therapists is a relevant issue, and this tool could be helpful in simplifying their workflow. While the impact is practical, the approach is incremental and may inspire further exploration of visual summarization but is unlikely to be transformative on its own."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a tool designed to help mental health therapists by summarizing counseling sessions into mind maps. Using a lightweight language mode GPT-4o minil, the tool takes session transcripts and generates visual summaries that highlight key points in a structured, easy-to-read format. This approach aims to reduce the cognitive load on therapists by providing quick, organized overviews of each session, making it easier to recall important details. The study also includes human evaluations, showing that these visual summaries effectively capture the main elements of a session, potentially enhancing therapeutic practice by simplifying documentation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited Sample Size in Evaluation\nAlthough the authors mention that they selected 20 transcripts as a “preliminary” sample, the decision to limit the entire study to these 20 samples is problematic given that the original MEMO dataset contains 212 transcripts. Relying solely on such a small subset, when a much larger set is available, raises concerns about the representativeness of the findings. A larger, more diverse sample could provide a stronger basis for evaluating the tool’s reliability across different counseling scenarios and patient-therapist interactions. Expanding the sample size is essential to enhancing the credibility of the results and giving a fuller picture of the tool's performance.\n2. Insufficient Evaluation Criteria and Sample Size of Evaluators\nThe evaluation approach has two issues: (1) the limited expertise of the evaluators and (2) the small number of evaluators. Referring to the evaluators as “participants” implies they were not specially trained or highly qualified to evaluate counseling summaries, which could compromise the quality of feedback. A robust evaluation for this type of tool typically requires input from domain experts — such as mental health professionals — who can reliably assess criteria like accuracy, relevance, and therapeutic usefulness based on their experience.\nAdditionally, the study relies on only three evaluators, which is insufficient for a reliable, statistically meaningful assessment. This small evaluator pool, combined with limited experience in mental health, limits the confidence one can have in the evaluation results.\n3. Lack of Comparisons with Other Summarization Methods and Models\nThe paper does not compare mind maps with alternative summarization formats, such as text-based summaries or visual formats like concept maps. This absence weakens the justification for mind maps as the preferred format, as their claimed cognitive benefits remain untested. Additionally, the study only uses GPT-4o Mini, without comparing it to other language models. This limits the scope since other models might perform better or capture nuances differently.\n\nBy addressing these issues — expanding the sample size, involving a more qualified evaluator pool, and incorporating comparisons with other methods — the study would achieve a higher standard of rigor and offer stronger evidence for the proposed tool’s effectiveness."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024from,\ntitle={From Counseling Transcript to Mind Map: Leveraging {LLM}s for Effective Summarization in Mental Health Counseling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zPxlHOLxmh},\nnote={under review}\n}"
},
"abstract": {
"value": "The increasing number of patients with mental health illness has heightened the cognitive load on therapists, making it challenging for them to provide personalized care that each patient requires. Summarizing counseling sessions can aid mental health practitioners in recalling key details. However, most existing research on summarization focuses primarily on text-based summaries which often require significant cognitive effort to read and interpret. Visual-based summary such as mind maps is proven to help enhance cognitive understanding by giving a quick overview of topics and content. Nevertheless, due to the complex nature of counseling which involves substantial qualitative data, generating visual-based summaries using traditional AI models can be challenging. With the recent advancements in Large Language Models (LLMs), these models have demonstrated the capability to perform tasks based on instructions and generate outputs in various formats. In this study, we develop a web-based summarization tool that serves as a pipeline in performing summarization of counseling transcripts into visual-based mind map summaries using LLMs. We conducted a human evaluation to validate the effectiveness of the generated visual-based summary based on criteria of accuracy, completeness, conciseness and coherence. Our findings show that our web-based summarization tool can effectively extract key points from counseling transcripts and present them in visual-based mind maps, demonstrating its potential in enhancing insights for therapists, ultimately simplifying the process of documenting counseling sessions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Models",
"Visual-based Summarization",
"Mental Health Counseling"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/33477911d157483e340f884a4a8d9df61edde664.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "From Counseling Transcript to Mind Map: Leveraging LLMs for Effective Summarization in Mental Health Counseling"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zRsFAUQDRk | Class-wise Generalization Error: an Information-Theoretic analysis | main | Active | information-theoretic bounds;generalization error;class-bias | learning theory | 5;5;6;6 | 4;3;4;4 | 3;3;3;3 | 2;2;2;3 | 3;1;2;4 | 5.5 | 3.75 | 3 | 2.25 | 2.5 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Major questions/comments:\n\n1. Definition 1 looks like an adaptation of Eq 3 in Bu et al., 2020. In particular, the difference of two terms given by the population loss and the empirical loss remains. The difference is that Def 1 in the present work considers conditioning on a specific class y. Beyond this generalization to per-class analysis, **could the authors please highlight contributions compared to Bu et al., 2020 (and also Xu & Raginsky, 2017, Zhou et al., 2020)?** I am asking as a non-specialist reader.\n\n2. Theorem 1 in the present work is similar to Theorem 1 of Xu & Raginsky, 2017, except that Xu & Raginsky, 2017 considers no conditioning on a specific class y. Could the authors comment on the difference? \n\n3. The idea of “supersample” used in Def 3 seems to be inspired by Steinke & Zakynthinou 2020 (see Sec 1.2 therein). Could the authors comment on the new parts in the present work wrt to the original supersample idea?\n\n\n4. The notation $P(\\mathbf{W}|\\mathbf{S})$ is used in the paper to denote the distribution of learned weights $\\mathbf{W}$ given a training sample $S$. In Def 1, the defined class-wise generalization error involves $P_{\\mathbf{W}, \\mathbf{S}_y}$. This presumably means $P(\\mathbf{W} | \\mathbf{S}_y) P(\\mathbf{S}_y)$. What does $P(\\mathbf{W} | \\mathbf{S}_y) $ mean? How can a classifier learn from only data from one class $y$?\n\n5. L210. How does Def 2 depend on $P(y)$? Likewise how does Def 3 *not* depend on $P(y)$?\n\n6. L272: “if model parameters W memorize the random selection U, the CMI and the class-generalization error will be large”. I thought that U is a vector of Rademacher random variables introduced solely for analysis. Do you actually empirically sample U and use it to train W?\n\n\nMinor:\n* L1454 in the appendix. Log is missing when expanding the KL divergence.\n\n* Minor suggestion. I think CIFAR10 can be considered a solved problem (much like MNIST). I am aware that the paper has experimental results on CIFAR100 in the appendix. Test accuracy on CIFAR 100 of state-of-the-art models can be almost 100% these days. On the empirical side, it would be good to consider one more experimental setting where the model lacks capacity to tackle the problem. CIFAR 10 is too easy for a model class like ResNet 50. For instance, considering a ResNet 8 on ImageNet would be a good setting where the model struggles to learn well due to the lack of capacity. It would be good to check whether the proposed bounds hold in this setting as well. \n(Note that this is not a request for more experiments for the purposes of evaluating the paper. It is a suggestion to help strengthen this work. The authors need not try to conduct this experiment during the rebuttal. It’s not a major request.)"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper is theoretical in nature. The idea of pursuing generalization errors for each class is claimed to be new. If so (to be verified with other reviewers), then this research direction is original, The proposed information theoretical bounds appear to generalize the bounds studied in Bu et al., 2020, Xu & Raginsky, 2017, Zhou et al., 2020 and other works, which mostly study the standard generalization bounds (i.e., averaged over all classes, and are not class-wise).\n\nEmpirically it is verified that the results in Theorem 3 and Theorem 4 provide valid class-wise generalization bounds in both standard CIFAR 10 and a noisy variant of CIFAR 10. This demonstrates that the contributes bounds are widely applicable to several settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes characterizing classification generalization errors of each class separately. The motivation is that neural networks do not generalize equally for all classes. The paper demonstrates this phenomenon empirically on CIFAR10 and CIFAR100, and also notes that class-wise generalization depends on factors beyond the class itself. Main contributions are several variants of per-class information-theoretic generalization bounds. The paper notes that the mutual information between the model and the class data can be used to characterize (or upper bound) the class generalization error."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(Recommended actions are clearly listed in the Questions section.)\n\n\n*Presentation:*\n\nWriting can be much improved. In particular, many theoretical results are not accompanied with a justification, or a practical implication. Writing appears to be tailored to specialists of this subject area. Sections and writing flow in the paper can also be improved. For instance, Theorem 1 is introduced only to be superseded by Sec 2.2 which, at the beginning of the section, states a limitation of Theorem 1. Def 2 (a definition of class-wise generalization errors) is introduced only to be superseded by Def 3 (see reason at L210). It is hard for me to form a coherent story. Same for Theorem 2 (class-CMI) which is introduced only to be commented on before introducing Theorem 3 that the bound in Theorem 2 is hard to evaluate (see L275). \n\n*Novelty:*\n\nThe contributed results, while generalizing bounds in existing results, are also heavily based on them. This raises a concern about the novelty. For instance, Def 1 looks like an adaptation of Eq 3 in Bu et al., 2020."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1.Motivation: Even though the authors reveal label-dependency of generalization bound, in addition to mentioned in the Sec 4.1, can you discuss how different labels affect the standard generalization error? Or can you explain the scenarios where class generalization must be considered?\n2.Essentiality: The derivation of Theorem 1 and Lemma 2 starts from KL divergence (or CMI) and does not exceed this scope. Do these conclusions reveal any more fundamental facts?\n3.Proof Details: (i) Regarding Equation 30, is there an inversion of the positive and negative signs? (ii) In response to the issue mentioned in Remark 2, the authors introduce a novel loss random variable. However, the bound presented in Equation 47 is also applicable to the expression preceding Equation 31. Consequently, in Theorem 3, the term$\\max\\left(\\mathbb{1}_{\\left\\{y_{i}^{-}=y\\right\\}},\\mathbb{1}_{\\left\\{y_{i}^{+}=y\\right\\}}\\right) $can be omitted, similar to the approach taken in Theorem 4. Do these facts have any impact on your findings?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Originality: The authors introduce a new concept - \"class-generalization error\", to address the issue that the existing generalization boundaries cannot capture the varying generalization performance across different classes.\nQuality: The authors have provided detailed proofs for each conclusion and conducted comprehensive numerical experiments.\nSignificance: The authors not only extend their theoretical findings to derive bounds for diverse scenarios but also shed light on label-dependent generalization bounds."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In order to address the issue that the existing generalization boundaries cannot capture the varying generalization performance across different classes, the authors introduce the concept of \"class-generalization error,\" and derive bounds based on KL-divergence and super-sample technique, validating their bounds in different networks.\n\nTheir theoretical tools extend to: (i) Deriving class-dependent generalization error bounds affecting standard generalization and tightening existing bounds; (ii) Providing tight bounds for subtask problems with test data subsets; (iii) Deriving bounds for learning with sensitive attributes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The notation in the formulas is not concise enough. For instance, in Eq 23., the parameters of $\\ell$ are in the order of w, x, y, whereas in Eq 54, they have become z, w. In addition, some formulas are not aesthetically pleasing, such as [] in Eq11.\n\nSome questions about the proof are discussed in Question part."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "— Could you address the comment above regarding Appendix C.2? I acknowledge that I may have missed something.\n\n— How does the approach with class-specific gradient noise in Appendix D.7 compare with an approach that adds gradient noise for every class?\n\n— There are some results in the PAC-Bayesian literature on generalization bounds for the confusion matrix (e.g. Morvant et al, “PAC-Bayesian Generalization Bound on Confusion Matrix for Multi-Class Classification”). How does your work relate to this literature?\n\n— Lemma 2: Does $V$ depend only on $W$ here or on $W$ and $z_{[2n]}$?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper begins by motivating the problem at hand in a very nice way, illustrating that classwise error discrepancies occur and do not behave in ways that are immediately obvious. The presented results are evaluated thoroughly, and interesting extensions are discussed. While the core of the derivations rely on standard techniques, there are some technical intricacies to be dealt with, and the present application is new."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the generalization performance of learning algorithms are studied on a classwise level—i.e., bounds are provided between the population loss for samples on a class and the training loss for samples from that class. These bounds are obtained using information-theoretic tools, i.e. the KL divergence and conditional mutual information. Empirical evaluations are performed to verify that classwise discrepancies exist in real applications, and that the proposed bounds correlate with them. Several extensions and connections to related problems (subtask, sensitive attributes) are presented."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Some more discussion on the topic of imbalanced data sets and how this affects classwise generalization error would be of interest. For instance, explicitly stating that the classes in CIFAR10 are balanced could be useful for the reader.\n\nThe discussion in Appendix C.2 does not seem to be accurate. Unless I’m mistaken, the following appears to be perfectly valid:\n\n$$ E_{\\hat Z_{[2n]}, U, W}[ \\frac{1}{n} \\sum_i \\ell(\\hat Z_i^{-U_i}, W) - \\ell(\\hat Z_i^{U_i}, W)] = \\frac{1}{n} \\sum_i \\big( E_{\\hat Z_{i}, U, W}[ \\ell(\\hat Z_i^{-U_i}, W)] - E_{\\hat Z_{i}, U, W}[ \\ell(\\hat Z_i^{U_i}, W)] \\big) $$\n$$ = \\big( E_{\\hat Z_{1}, U, W}[ \\ell(\\hat Z_1^{-U_1}, W)] - E_{\\hat Z_{1}, U, W}[ \\ell(\\hat Z_1^{U_1}, W)] \\big) = \\big( E_{\\bar Z, \\bar W}[ \\ell(\\bar Z, \\bar W)] - E_{Z, W}[ \\ell(Z, W)] \\big) $$\n\nwhich is the generalization gap. First step was linearity of expectation, second used the symmetry of the algorithm + iid (despite the altered process, the sample pairs are iid), and the last one used that $WW$ and $hat Z_1^{-U_1}$ are independent. The fact that the generalization gap is linear in the test/train losses mean that the $y$-specificity of the pairs is marginalized out.\n\nNow, if one were to consider bounds where the left-hand-side was given in terms of a function of train/test loss (e.g., the binary KL divergence as in the Maurer-Langford-Seeger bound), this would no longer hold.\n\nMinor:\n\nShould $n_y <n $ on line 136 be $\\leq$?\n\n“$n$ super-samples” on line 193: in the original paper from Steinke and Zakynthinou, the term “supersample” was used to describe the full set of $2n$ samples.\n\nCorollary 2 and Theorem 5 (e.g.): Would be nice to adjust the size of brackets.\n\nLine 1070: “requieres”"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- In the example of \"truck\", the authors show that their bound captures the behavior of the noisy case. Is it possible to explain the increase in the error w.r.t samples using the proposed bound?\n- In Remark 3, the authors mention that their bound is discrepancy-independent. However, the reason for this is not their bound, but the change of the definition from $L_E(W, S)$ to $L_{E_Q}(W, S)$. I would suggest the authors rephrase this part and perhaps break it down into two factors, one due to the change in the definition (still dependent on the discrepancy), and one the bound (discrepancy-independent) to avoid being misleading.\n- In [1], the authors provide a general framework for information-theoretic bounds which saves a lot of proof as long as the authors can show some regularity of the distributions. Since the authors' technique of proof follows the same techniques in the general generalization bounds, have authors considered using this framework?\n\n\n[1] Chained Generalisation Bounds, Clerico et al. COLT 2022"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-written, and easy to follow. The authors have motivated their work nicely both at the beginning of the paper and the end by providing specific examples of some scenarios in which the class-wise generalization bound can be insightful like the sub-task problem as a specific case of distribution shift. The idea of caring about each class independently is new and in some cases tightens the pre-existing bounds."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the class-generalization error bound as opposed to the more traditional case of expected generalization over the whole data distribution. The authors provide a new class-specific generalization definition and give multiple information-theoretic bounds based on the KL divergence (more classic), and the super-sample technique (maybe somewhat newer). They analyze the tightness of their bounds in some experiments on CIFAR10 and CIFAR100 showing the failure of the general generalization bounds and the success of their class-specific approach. Finally, the authors provide some examples of the applications in which the provided bounds can be useful and add new insights."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The technical contribution of the paper is limited. While the definition of class-wise generalization bound is new, the results are heavily based on the previous works [Xu and Raginksy 2017, Zhou et al 2022, Harutyunyan 2021, Steinke and Zakynthinou 2020, Clerico et al 2022, Wang and Mao 2023], and follow the same flow of proof, without any particular novelty. It would be useful if the authors mention the technical challenges their definition might propose to the problem and how it differs from the challenges one needs to overcome while taking the general generalization bounds.\nThe insight of some bounds being tighter than others again is well-studied in previous works. However, I agree with the authors that the tightness of class-wise generalization as opposed to general generalization bound in Corollary 1 is new."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper introduces and explores the concept of \"class-generalization error\" from an information-theoretic perspective."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024classwise,\ntitle={Class-wise Generalization Error: an Information-Theoretic analysis},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zRsFAUQDRk},\nnote={under review}\n}"
},
"abstract": {
"value": "Existing generalization theories for supervised learning typically take a holistic approach and provide bounds for the expected generalization over the whole data distribution, which implicitly assumes that the model generalizes similarly for all different classes. In practice, however, there are significant variations in generalization performance among different classes, which cannot be captured by the existing generalization bounds. In this work, we tackle this problem by theoretically studying the class-generalization error, which quantifies the generalization performance of the model for each individual class. We derive a novel information-theoretic bound for class-generalization error using the KL divergence, and we further obtain several tighter bounds using recent advances in conditional mutual information bound, which enables practical evaluation. We empirically validate our proposed bounds in various neural networks and show that they accurately capture the complex class-generalization behavior. Moreover, we demonstrate that the theoretical tools developed in this work can be applied in several other applications."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"information-theoretic bounds",
"generalization error",
"class-bias"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/be609e33d84f0663ec0e6aca4567da47599ab8c2.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Class-wise Generalization Error: an Information-Theoretic analysis"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zSHoaTNlmA | Segmentation using efficient residual networks with attention-fusion modules | main | Active | Segmentation;Attention mechanisms;Efficient residual networks | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 1;3;3;5 | 5;4;4;4 | 2;3;2;2 | 1;3;2;2 | 2;3;2;1 | 3 | 4.25 | 2.25 | 2 | 2 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the \"Weaknesses\" above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposed method improves the segmentation results based on the transformer architecture, which is lightweight."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a transformer with an encoder-decoder structure to fuse the global and local information from the image for semantic segmentation. The proposed method improves the semantic segmentation on multiple datasets, demonstrating its effectiveness."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It should be noted that IEEE CVMI has accepted the manuscript \"SERNet-Former: Segmentation by Efficient-ResNet with Attention-Boosting Gates and Attention-Fusion Networks.\" Although CVMI has not yet provided the official version of the accepted paper, the author has provided a GitHub repository, which indicates that CVMI has accepted the paper. Furthermore, the figures and experimental results in the GitHub repository with arXiv and CVPRW versions are the same as those in the paper submitted to ICLR. The author should clarify this.\n\n2. Apart from the above point, I find that this paper's presentation is of low quality. It lacks the motivation to propose a new method of fusing local and global semantics, which has been well-known for improving semantic segmentation performance. I suppose this motivation is presented in the introduction, which is missed in every part of the paper. Though the performances have been compared in the experimental section, I still cannot figure out why the proposed method yields better results. Furthermore, the presentation of the method lacks the necessary information. The critical Figure 2 fails to provide a clear illustration of the method. The relationship between Figure 2 and the equations is also unclear. This fact further disallows the reader to understand the insight behind the method.\n\n3. Though the proposed method improves the segmentation results, it still lags behind other methods on important datasets (see test set on Cityscapes in Tab 4).\n\nBased on the above points, I believe this submission fails to meet the ICLR standard and recommend its rejection."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I noticed that this paper has already been accepted by IEEE CVMI 2024, titled \"SERNet-Former: Segmentation by Efficient-ResNet with Attention-Boosting Gates and Attention-Fusion Networks.\"\n\nAdditionally, I have a question regarding:\n\n1. **What specific metrics were used to evaluate the performance of SERNet-Former compared to existing segmentation methods, and how do these metrics support the claims made by the authors regarding its effectiveness?**\n\n2. **Can the authors provide more details on the design choices behind the attention mechanisms used in SERNet-Former and how they contribute to the model's overall performance in segmentation tasks?**"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- **Innovative Approach**: The paper presents a novel method, SERNet-Former, which combines Efficient-ResNet with attention mechanisms, demonstrating a promising advancement in segmentation tasks.\n- **Comprehensive Experiments**: The authors conduct extensive experiments across various datasets, showcasing the effectiveness of their approach and providing a thorough comparison with existing methods.\n- **Clear Presentation**: The manuscript is well-organized, with a logical flow that makes the methodology and results easy to follow, enhancing the overall readability."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper \"SERNet-Former: Segmentation by Efficient-ResNet with Attention-Boosting Gates and Attention-Fusion Networks\" introduces an innovative segmentation framework that leverages advanced attention mechanisms within a robust network architecture. The authors provide comprehensive experimental results that validate their approach against existing methods, demonstrating its effectiveness across multiple datasets. However, the manuscript could be improved by addressing the limitations of the method, expanding the discussion of the ablation studies, and including a broader range of comparative benchmarks. Overall, this work represents a significant contribution to the field of image segmentation. \n\n**However**, I noticed that this paper has already been accepted by IEEE CVMI 2024, titled \"SERNet-Former: Segmentation by Efficient-ResNet with Attention-Boosting Gates and Attention-Fusion Networks.\" You can view the acceptance list for the conference at https://cvmi2024.iiita.ac.in/AcceptedPapers.php. After comparing the version in the GitHub repository (https://github.com/serdarch/SERNet-Former) with the version submitted by the authors to ICLR, it appears that there are only minimal differences between the two."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Limited Discussion on Limitations**: The paper could benefit from a more in-depth discussion of the limitations of the proposed method, particularly in relation to different types of data or specific segmentation challenges.\n- **Insufficient Detail in Ablation Studies**: While the authors present some ablation studies, additional detail on the impact of each component in the network would strengthen the understanding of their contributions.\n- **Comparative Analysis**: The comparison with state-of-the-art methods could be more robust, particularly by including more recent benchmarks to provide a clearer context for the performance claims."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "-"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "-"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "-"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Considering that this paper has already been accepted by IEEE CVMI, I think it should be rejected."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Why is the sigmoid function used as the activation function in the AbG module? Will it aggravate the gradient vanishing problem during training? Have you tried other activation functions?\n2. Dilation-based convolution is used in DbN module, why not use dilation convolution in encoder and decoder part?\n3. During upsampling, the image size changes from H/4, W/4 to H, W. Why not use progressive upsampling?\n4. From Table 3 and Table 4, your performances are not as good as InternImage and VitAdapter-L(test mIoU). What are the parameters and inference speed(e.g millisecond) of these two methods? How do they compare to yours?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.This paper has made useful explorations in the fusion of global and local information, bringing some inspiration to this field.\n2.Experimental results show that this method has some advantages"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes AbGs,AfNs to fuse global and local semantic information in segmentation. Attention-fusion networks are desined in the decoder part to improve the efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The writing of this article is poor. Many sentences are not clear and not easy to understand. Some sentences are too long and difficult to understand. e.g. line 125: The multi-scale problem in computer vision can be described as the discrepancy in integrating the different sizes of spatial and channel-based semantic information of an object acquired from the global and local contexts of segmentation networks.\nline 203:It is aimed at developing an encoder-decoder architecture with additional attention mechanisms to get efficient segmentation networks fusing semantic information from different contexts by regarding the multi-scale problem.\n\n2.The method in this article lacks insight, and many designs are tricky. e.g. Why are there two consecutive layers (AbM4, AbM5) in H/8 and W/8 resolutions? For another question, please refer to Question 2.\n\n3.The experimental analysis is not enough, and the ablation experiment is not very sufficient. More details can refer to Question 4.\n\n4.This paper seems to have multiple submissions,which was accepted by IEEE CVMI previously."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Unique efficient residual network with attention mechanisms and fusion networks are developed to overcome the increasing computational cost of fusing semantic information from global and local contexts of segmentation networks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024segmentation,\ntitle={Segmentation using efficient residual networks with attention-fusion modules},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zSHoaTNlmA},\nnote={under review}\n}"
},
"abstract": {
"value": "Fusing the global and local semantic information in segmentation networks is a challenging task with the problems of computational cost and long-range dependencies to improve state-of-the-art methods. Based on the recent success of transformers and attention mechanisms, this research utilizes an encoder-decoder architecture named SERNet-Former, integrating convolutional neural networks with attention-based algorithms for efficient segmentation. Accordingly, attention-boosting modules are employed together with the conventional residual networks to deal with the computational cost in deriving the feature maps of the global context in the encoder, generating the unique baseline architecture, Efficient-ResNet. The decoder network is developed with additional attention-fusion networks leveraging the semantic information from the global and local contexts. Attention-fusion networks also deploy additional convolution layers in the decoder part and improve the efficiency of the network in the one-to-one conversion of the semantic information. The challenging CamVid and Cityscapes datasets are used to develop and test the network and observe the improvements by the proposed methods applied to the residual networks. Accordingly, SERNet-Former sets state-of-the-art results (84.6 \\% mean IoU) on the CamVid and (87.3 \\% mean IoU) the Cityscapes validation datasets, and challenging results (84.8 \\% mean IoU) on the Cityscapes test dataset. The project repository will be shared with the reader."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Segmentation",
"Attention mechanisms",
"Efficient residual networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a13cafe0d239e4c3a166221d02e0ba87172a2ec7.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Segmentation using efficient residual networks with attention-fusion modules"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zSUXo1nkqR | TreeX: Generating Global Graphical GNN Explanations via Critical Subtree Extraction | main | Active | GNN Explainability;global-level;XAI;Explainable AI | interpretability and explainable AI | 3;3;3;3;5 | 4;3;3;5;4 | 2;2;3;2;2 | 2;3;2;1;2 | 3;2;2;2;2 | 3.4 | 3.8 | 2.2 | 2 | 2.2 | 0.133631 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Providing global explanations is a crucial aspect in the development of trustworthy GNN models.\n - The paper provides adequate background information through preliminaries and related sections.\n - The effectiveness of the proposed method is evaluated by multiple datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method for providing global explanations in GNNs, specifically targeting maximally powerful MPGNNs. The approach leverages clustering based on node embeddings within MPGNNs to explain substructure information at a global level. Its effectiveness is evaluated across various datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The methodological details in the paper are unclear. For example, while it mentions identifying graph substructures based on node embeddings, the specific approach is not adequately detailed, making it challenging to understand.\n - A significant issue is the limited evaluation measures, with insufficient justification provided. Various measures, such as sparsity, fidelity (+ and -), and fidelity\\delta [1], are commonly used and could be considered in the evaluation.\n - More comparisons with existing global XAI methodologies are needed. Methods like D4Explainer [2] and TAGE [3], which also can provide global explanations, would enhance the baseline comparisons.\n\nReferences:\n\n[1] Zheng et al., \"Towards Robust Fidelity for Evaluating Explainability of Graph Neural Networks,\" ICLR 2024.\n\n[2] Chen et al., \"D4Explainer: In-distribution Explanations of Graph Neural Network via Discrete Denoising Diffusion,\" NeurIPS 2023.\n\n[3] Xie et al., \"Task-Agnostic Graph Explanations,\" NeurIPS 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Has the authors considered a more basic baseline, where we can use an attention-based aggregator over last layer node embeddings to make the final graph level prediction? The, we can simply return the top-k subtrees based on the attention scores. I found this method to be more convenient, easier to implement, and more efficient to compute. I did not get how the proposed approach fundamentally differs from that simpler baseline."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The proposed method is intuitive and technically sound.\n- The experimental analysis is abundant."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a type of global GNN explanation methods, by extracting the subtrees incurred by the GNN message passing mechanisms. The paper argues that this can provide more intuitive local, class, and global level explanations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- First, the definitions for different types of GNN explanations are quite vague. This paper claims that GNN explanation methods can be categorized into local and global levels; it claims that instance-level explanation is local-level. Under this definition, the proposed method should be considered as local-level, since it only works in the instance-level graph graph classification task where all the subtrees are extracted from the graph to be predicted. \n- Second, the proposed methods only seem to work with graph-level prediction tasks and do not seem to work with node-level prediction tasks. I did not find the paper explicitly discussing that. The existing popular GNN explainability methods, such as GNN Explainer, can work with both node and graph-level prediction tasks. The paper should clearly emphasize its limitations.\n- A key idea of this paper - last layer node embedding represents the full L-hop subtrees, is a well-known result in the GNN research domain (for example, it has been taught throughout the Stanford CS224W course with millions of views on Youtube). The justifications in Section 4.2 look redundant to me. Furthermore, Theorem 4.2 and its proof are a direct application of the results from the GIN paper, which is also redundant. \n- Overall, I do not find the proposed method offers a new understanding of the explainable GNN domains, especially given that it can only work with graph-level prediction tasks, and the performance is only on par or even worse than existing methods (Table 1). I am happy to defend my opinion further if needed"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "I have some questions regarding the methodology for constructing concepts based on the last-layer embeddings. \n\nHow does the method ensure these clusters are reflecting meaningful, distinct structural concepts within the graph? Elaborated below.\nThe paper uses last-layer embeddings to construct concept. It is true that the last layer embedding for a specific root node aggregates the nodes within L-hops. However, this aggregation would overlook finer subgraph features (i.e. specific motifs or relational patterns[1][2]). Will this affect the explainability and how does the method address this limitation?\nAdditionally, how to validate the meaningfulness of these clusters? Regarding the clustering algorithm, given the similarities in last-layer embeddings, k-means may face issues that embeddings can be smooth or overlapping. As a result, it might not distinguish nuanced structural variations effectively. Did you evaluate whether k-means clustering accurately captures meaningful and distinct concepts in the last-layer embeddings? Do you consider alternative clustering approaches for finer clustering?\n[1] GNNExplainer: Generating Explanations for Graph Neural Networks\n[2] PGM-Explainer: Probabilistic Graphical Model Explanations for Graph Neural Networks"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed method is fast comparing to previous subgraph based method with last layer embedding after message aggregation, which makes it potentially more scalable comparing to subgraph based enumeration method.\n\nThe proposed method can cover both local and global concept extraction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents TreeX, a method for GNN explainability. It extracts subtree information to construct local concepts, global concepts, and further generate rules for class-specific interpretations. TreeX is computationally efficient compared to previous subgraph-based methods, as it leverages message passing to capture subtree features, while still delivering meaningful concepts for interpretability."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The subtree features extracted by last layer embedding might not be able to fully reflect meaningful concept, and could ignore certain patterns that are useful for explainability. See questions for more details.\nThe evaluation is not sufficient, deeper experiments should be done on more architectures (GCN, GraphSAGE, GAN, etc), as different GNN architectures have varying expressive power and aggregation mechanisms. Besides, variations in last layer embedding quality across architectures could influence the interpretability in TreeX."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1. The authors currently evaluate the performance of the proposed method under different random seeds when splitting the data. Given that clustering algorithms are sensitive to initialization based on random seeds, how do the explanation results change under different cluster assignments resulting from different initialization?\n\nQ2. Why are the extraction processes for local concepts in Eq. 4 and global concepts (which are closest to the center of cluster $U_j$) from clusters different? Can you further discuss the motivation behind the proposed method?\n\nQ3. How can explicit local explanations be derived from the global rules in the form of presenting important subgraphs?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Providing local and global explanations is crucial for understanding predictive outputs at both individual and class levels. Figure 1 offers an insightful perspective on how each instance's prediction compares to class-specific patterns, making it easier to debug predictive outputs. Furthermore, extracting concepts based on subtrees presents a novel approach to mining meaningful patterns."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "TreeX introduces an explanation method primarily for graph classifiers, providing both local and global explanations based on the subgraph concept derived from their proposed subtree extraction method. Local concepts are extracted from subtrees in individual instance graphs. Final local concepts for each input graph are obtained by clustering through $k$-means. Global concepts are then extracted by aggregating and clustering these local concepts. To understand the impact of the discovered global concepts, the weight of each concept is optimized to represent the prediction of a certain class."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. The proposed method's heavy reliance on K-means clustering for both local and global concept extraction introduces several inherent weaknesses. Consequently, the proposed method exposes the weaknesses and limitations of clustering such as initialization issues, sub-optimal issues, the sensitivity of hyper-parameter $k$, and quality issues such as under-clustering or over-clustering. Due to this well-known limitation, the assignment of clusters is likely to be sensitive. Thus, explanations based on the clustering method may not be robust. \n\nW2. The process of providing final explanations in Figures 8, 9, 10, and 11 is not clear. Based on class-specific global rules, connecting concepts and subgraphs in the specific input graph. \n\nW3. Experiments are not thorough in several aspects as below. \n\n(1) A quantitative evaluation of global explanations is lacking, which is a key contribution of the proposed method. This aspect should be more thoroughly examined.\n\n(2) Global explainers in the graph domain such as XGNN [1] and GNNInterpreter [2] are not considered in the comparison.\n\n(3) No visualization of the local explanations. \n\n(4) AccFidelity is an easy metric to achieve decent performance as it counts the explanation as correct even when $\\hat{y}_i=0.51$ in binary classification tasks. Nevertheless, authors only use $Fid^{-}$, excluding the $Fid^{+}$ [3] which is the most common measurement in many literatures. \n\n(5) The time analysis focuses solely on explanation inference, omitting the process of concept extraction. This process likely incurs significant computational costs due to its use of k-means algorithms in twice and post-processing steps to extract final concepts. A more comprehensive time analysis would provide a clearer picture of the method's efficiency.\n\nMinor typo in line 352, e.g., In the he BA-2Motifs data.\n\nReference\n\n[1] Yuan, Hao, et al. \"Xgnn: Towards model-level explanations of graph neural networks.\" *Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining*. 2020.\n\n[2] Wang, Xiaoqi, and Han-Wei Shen. \"Gnninterpreter: A probabilistic generative model-level explanation for graph neural networks.\" *arXiv preprint arXiv:2209.07924* (2022).\n\n[3] Yuan, Hao, et al. \"Explainability in graph neural networks: A taxonomic survey.\" *IEEE transactions on pattern analysis and machine intelligence* 45.5 (2022): 5782-5799."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "P196: Should the $max$ operator or $top_k$ operator be in equation 4?\nP264: presentaions --> representations?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper made two innovative contributions:\n1. It providing a global explanations of GNNs on the dataset or class level in the format of subgraphs rather than nodes, language rules or prototype embeddings in the previous literature. \n2. Instead of subgraph enumeration or search, the paper proposed an efficient algorithm for subtree extraction, and using the root node embedding as subtree embedding."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed a method to explain message-passing GNNs via critical subtree extraction. It analyzes and extracts critical subtrees incurred by the inner workings of message passing. An efficient algorithm which doesn't require complex subgraph search is proposed to aggregate subtrees in the embedding space. As a result, we can make intuitive graphical explanations for Message-Passing GNNs on local, class and global levels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have concern on the global concept extraction method outlined between lines 200-206. How can the method ensure that the spaces of embedding across $D$ graph instances are aligned? The authors should provide a detailed explanation of this process, as without it, I am skeptical about the possibility of clustering the $kD$ local graph concepts, because these concepts originate from $D$ distinct embedding spaces.\n\nIn the \"global rule generation for each class\" (line 220-), I doubt the method can always work in general situations. The global rules are generated through frequency-based analysis, and it may be challenging to ensure that these frequency-based features always possess the necessary discriminative power to distinguish the differences between classes.\n\nThe innovation of this paper is not significant. Please compare the proposed method with \n- Motif-driven Subgraph Structure Learning for Graph Classification (https://arxiv.org/abs/2406.08897)\n- STExplainer: Global Explainability of GNNs via Frequent SubTree Mining (https://openreview.net/forum?id=HgSfV6sGIn)"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a subtree-based method to produce class-specific global-level explainability for Message Passing Neural Networks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024treex,\ntitle={TreeX: Generating Global Graphical {GNN} Explanations via Critical Subtree Extraction},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zSUXo1nkqR},\nnote={under review}\n}"
},
"abstract": {
"value": "The growing demand for transparency and interpretability in critical domains has driven increased interests in comprehending the explainability of Message-Passing (MP) Graph Neural Networks (GNNs). Although substantial research efforts have been made to generate explanations for individual graph instances, identifying global explaining concepts for a GNN still poses great challenges, especially when concepts are desired in a graphical form on the dataset level. While most prior works treat GNNs as black boxes, in this paper, we propose to unbox GNNs by analyzing and extracting critical subtrees incurred by the inner workings of message passing, which correspond to critical subgraphs in the datasets. By aggregating subtrees in an embedding space with an efficient algorithm, which does not require complex subgraph matching or search, we can make intuitive graphical explanations for Message-Passing GNNs on local, class and global levels. We empirically show that our proposed approach not only generates clean subgraph concepts on a dataset level in contrast to existing global explaining methods which generate non-graphical rules (e.g., language or embeddings) as explanations, but it is also capable of providing explanations for individual instances with a comparable or even superior performance as compared to leading local-level GNN explainers."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"GNN Explainability",
"global-level",
"XAI",
"Explainable AI"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/18278a746b5e12831cea5a991c3aeabe78422b24.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/ed8d1ad161622f82fb142d442d5d66f3a1a7a6ab.zip"
},
"title": {
"value": "TreeX: Generating Global Graphical GNN Explanations via Critical Subtree Extraction"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zSfeN1uAcx | To Code or Not To Code? Exploring Impact of Code in Pre-training | main | Active | code data;pre-training;code pre-training | foundation or frontier models, including LLMs | 3;6;6;8 | 4;3;4;4 | 2;3;3;3 | 2;3;3;3 | 3;3;3;3 | 5.75 | 3.75 | 2.75 | 2.75 | 3 | -0.080845 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In the sentence \"Figure 8 shows the results of xxx\" in Section 3.2, it seems that \"Figure 8\" should actually be \"Figure 3.\" May be a typo?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper addresses a crucial question: does code data improve performance on non-code tasks? This question has been relatively unexplored, and the paper offers a fresh perspective on the potential benefits of code data for models primarily intended for natural language tasks.\n\n- The methodology is rigorous, employing controlled experiments that examine various factors, such as code quality, proportions, and stages of pre-training. The large-scale experiments (up to 2.8B parameters) provide an in-depth understanding of how code data impacts model performance.\n\n- This work has significant implications for pre-training dataset design, suggesting that code data improves generalization and that high-quality synthetic code can be particularly impactful. This insight can guide future LLM development, especially in choosing and refining pre-training data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates the impact of incorporating code data into pre-training for large language models (LLMs).\nIt explores how code data influences various tasks, including natural language reasoning, world knowledge, and open-ended generation. \nThrough extensive ablation studies, the authors reveal that adding code data yields substantial benefits across tasks beyond coding. \nThe study emphasizes code quality and finds that synthetic, high-quality code data further enhances general performance. \nThe results imply that code data should be included in the pre-training process as it significantly enhances generalization across tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- For the scaling experiments, some key information could be explored further. For instance, does the trend observed in Figure 4 hold consistently across models of different scales? Additionally, how can the trends identified be extended or generalized to larger models?\n\n- I am also curious about the model's performance on mathematical tasks, as these, like coding, are highly logical and reasoning-intensive. Including a benchmark evaluation on mathematical tasks could provide valuable insight into whether similar gains extend to other logic-focused domains.\n\n- In terms of writing, certain critical details could be clarified. For instance, it may not be immediately clear in the text which model size (e.g., 470M or 2.8B) is being evaluated in figures such as Figures 4 and 5. Adding such context in the captions or main text would enhance clarity for readers.\n\n**Given the close relationship between scaling laws and research on dataset composition, there is still room to refine and expand upon some of the paper's exploratory findings.**"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The investigation into the role of code in LLM pre-training addresses a crucial aspect of model performance with good implications for both research and practical applications. The exploration of the impact of code data and strategies for leveraging it provides valuable insights into enhancing model effectiveness, contributing to a deeper understanding of LLM capabilities.\n\nI have two concerns:\n\n1. Limited Technical Contribution: The study primarily consists of empirical analyses without introducing new methodologies or frameworks. While the findings are good, the technical contribution could be perceived as limited due to the absence of novel approaches or theoretical advancements.\n\n2. Limited Scale: The analysis is restricted to models with 470M and 2.8B parameters, which may not capture the performance dynamics across a broader range of model sizes and architectures."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ Important Area.\n+ Interesting Finding.\n+ Analysis of the Code Quality is Good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors examine the role of code data in the pre-training of large language models (LLMs), a practice that has become increasingly common, even for models not specifically designed for code. Despite anecdotal consensus among practitioners regarding the importance of code in enhancing the performance of general LLMs, there has been limited research analyzing its precise impact on non-code tasks. This study systematically investigates the influence of code data in pre-training on a diverse array of downstream tasks beyond code generation. The authors pose the question: “What is the impact of code data used in pre-training on a large variety of downstream tasks?” Through extensive ablation studies, they evaluate models ranging from 470M to 2.8B parameters across natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win rates. The results consistently demonstrate that code serves as a critical building block for generalization well beyond coding tasks. Improvements in code quality significantly enhance overall task performance, with findings indicating up to an 8.2% relative increase in natural language reasoning, a 4.2% boost in world knowledge, a 6.6% improvement in generative win rates, and a 12x enhancement in code performance when compared to text-only pre-training. The authors suggest that investing in code quality and retaining code during pre-training can yield substantial benefits across various tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Technical Contribution is Weak.\n- Limited Scale."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Given the observed performance drop on certain NL tasks, how does the study reconcile this finding with the broader claim of code data’s benefits for NLP tasks?\n\n2. Have the authors considered evaluating the pre-trained models on additional natural language tasks to better understand the generalization effects of code data?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "• The paper provides insights into the effects of code data on pre-training across a range of NLP tasks.\n\n• Experiments are conducted with multiple configurations, such as balancing code and text data, adding depth to the analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the impact of code data in pre-training for NLP tasks. The authors pre-train Transformer models with sizes ranging from 470M to 2.8B parameters with ablations of code and non-code data. The pre-trained models are evaluated on downstream tasks such as NL reasoning, world knowledge, and code benchmarks. Experimental results show that incorporating code into pre-training has a positive impact on non-coding tasks. In addition, improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of 8.2% in natural language (NL) reasoning, 4.2% in world knowledge, 6.6% improvement in generative win-rates, and a 12x boost in code performance respectively. The results suggest future investments in code quality and preserving code during pre-training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "• The findings are somewhat unsurprising, as incorporating diverse data sources (like code) in pre-training is known to improve model performance—a principle demonstrated by previous studies, including PaLM (Chowdhery et al., 2022), Gopher (Rae et al., 2022), and Bloom (Workshop et al., 2023). This paper extends these findings by analyzing code quality and proportion but does not significantly depart from established conclusions, such as the importance of data quality in pre-training (https://arxiv.org/abs/2207.05579).\n\n• Figure 4 suggests that including code data may actually reduce performance on some natural language tasks, which appears contradictory to the paper’s main claim. This inconsistency weakens the overall reliability of the conclusions.\n\n• The pre-training models used in this study are relatively small (470M and 2.8B parameters), which limits the generalizability of the findings. Larger models may be necessary to substantiate conclusions regarding the effects of code data on NLP pre-training."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Could the authors clarify whether the Balanced LM (50% text and 50% code) incorporates markup-style data in its training mix? The paper specifies that the Code-initialized model includes 80% code data and 20% markup-style data, but it remains unclear whether this type of data is also included in the Balanced LM."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- _Novelty_: This work is a first-of-its-kind, comprehensive study on how code data in pre-training affects LLMs across various natural language and coding tasks, offering new insights into data requirements for LLM pre-training.\n\n- _Comprehensiveness_: The authors carry out a broad set of experiments, exploring various scales of model size, data quantity, and training stages. Their work provides actionable guidance for future LLM development."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "## Summary:\nThe authors conduct a systematic investigation into the role of code data in the pre-training phase of LLMs, assessing the impact on three task categories: natural language reasoning, world knowledge, and code generation. They analyze three pre-training stages, including initial pre-training, continual pre-training, and a cooldown phase; evaluate the effect of code data’s proportion, quality, and properties on the model’s performance across the three task categories. Their results demonstrate that incorporating code data enhances model performance across all the pre-training stages, suggesting a balanced approach with optimized code data quality can boost the model’s ability in reasoning, general knowledge, and coding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Details on High-Quality Code Data**: A significant portion of the paper’s conclusions, particularly in Section 3.4, emphasize the positive impact of incorporating high-quality code data on LLM performance across different task categories. However, the description of the high-quality code data is limited to that it is proprietary and synthetically generated. The lack of transparency about the dataset's characteristics, such as the source, data format, or complexity raises questions about what specifically constitutes \"high quality\" and how these qualities directly influence performance. More details on criteria for data quality would strengthen the paper and enable the research community to apply similar quality standards in future work.\n\n2. **Potential Bias in Generative Ability Evaluation**: The evaluation of generative ability relies on the LLM-as-a-Judge approach, specifically through win-rate scores on the Dolly-200-English dataset, which is designed to reflect open-ended, non-code tasks. The authors attribute improvements in generative quality to the inclusion of code data in pre-training, arguing that code data enriches the model’s ability to generate preferred outputs. However, using LLMs to evaluate other LLM outputs has known limitations[1], including potential biases toward verbosity, structured formatting, and positional advantage. Moreover, the inclusion of markup-style code may enhance readability and formatting, making responses more appealing to the LLM evaluator. To mitigate ambiguity, qualitative examples of output differences would be helpful in illustrating how the inclusion of code data contributes to generative improvements beyond format and readability alone.\n\n3. **Minor Issues**: \n - In Section 3.2, Figure 3 is referenced incorrectly as Figure 8, which could confuse readers. \n - The legend for Figure 8 lacks a label for the gray bar, which reduces clarity in interpreting the data presented.\n\n[1] Zheng, L., Chiang, W. L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., ... & Stoica, I. (2023). Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36, 46595-46623."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Exploring the impact of code data in pre-training"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024to,\ntitle={To Code or Not To Code? Exploring Impact of Code in Pre-training},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zSfeN1uAcx},\nnote={under review}\n}"
},
"abstract": {
"value": "Including code in the pre-training data mixture, even for models not specifically designed for code, has become a common practice in LLMs pre-training. While there has been anecdotal consensus among practitioners that code data plays a vital role in general LLMs' performance, there is only limited work analyzing the precise impact of code on non-code tasks. In this work, we systematically investigate the impact of code data on general performance. We ask “what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation”. We conduct extensive ablations and evaluate across a broad range of natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win-rates for models with sizes ranging from 470M to 2.8B parameters. Across settings, we find a consistent results that code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of 8.2% in natural language (NL) reasoning, 4.2% in world knowledge, 6.6% improvement in generative win-rates, and a 12x boost in code performance respectively. Our work suggests investments in code quality and preserving code during pre-training have positive impacts."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"code data",
"pre-training",
"code pre-training"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/a3ea962bd1ab5e45464181f153dfdb8e44fbd870.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "To Code or Not To Code? Exploring Impact of Code in Pre-training"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zUD06a6leU | Spectraformer: A Unified Random Feature Framework for Transformer | main | Active | linearized attention;transformer;efficient transformer;kernel;random features | learning theory | 3;3;3;6 | 3;3;4;4 | 2;2;3;3 | 2;2;2;2 | 2;3;4;3 | 3.75 | 3.5 | 2.5 | 2 | 3 | 0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "Please see the weakness section, my ratings for the paper is based on the weakness (especially the second point)."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "-- The paper’s proposed unification exposes gaps which can be filled with novel combination of linear kernel methods\n\n-- The paper is well written and easy to follow\n\n-- On LRA benchmark the novel combinations show some promise"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the linearization of attention mechanisms in Transformers using kernel approximation. The authors propose to unify the wide range of work on linearizing the attention kernel in a unified framework. To that end they propose to look at the linear kernel methods as approximating the weight or the component function. In the process they propose new linear methods that fill the gap. Their empirical analysis showcases that other combination methods do perform slightly better than previous SOTA methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "-- Novelty: The formulation is somewhat of a repeat of chowdhury et al 2022’s formulation. Even though it is more complete with more component functions and weights. \n\n-- Empirical evaluation is insufficient. LRA dataset on its own is not sufficient to evaluate which combination works best. The LRA benchmark is old and doesn’t satisfy the the current requirements. IMHO for this paper to pass the acceptance threshold, I would want a much more thorough evaluation, on multiple benchmark dataset and multiple modalities. Then any conclusion made from the analysis would be useful for the practitioner.\n\n-- it is unfortunate that there isn't a clear winner or new novel recommendation that came out of the emperical analysis (which the author would recommend to a practitioner)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the extra computational and memory footprint corresponding to the additional parameters introduced in this mechanism ?\n2. The most general formula presented in the paper (Eq. 11) does not provide an unbiased estimation of the softmax kernel. Is there a subset of the instantiations coming from Eq. 11 that is a strict superset of the previously known low-rank linear attention mechanisms providing unbiased estimation of the softmax kernel and still providing unbiasedness ?\n3. The Authors say: \" Our empirical findings indicate that different kernels are good at different tasks and that kernel choice is fundamental to performant models\". More details would be important. What can we say about right kernels for particular applications (e.g. NLP, Vision, long-range-, short-range-attention tasks, etc.). Did the conducted experiments provide any additional insight to shed new light on that ?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The research on linear low-rank attention methods for Transformers is important for several practical reasons (fast inference, e.g. for on-device-deployment, etc.) and this paper aims to improve the existing methods in the field. The presented extension is sound and the idea to represent projections as learnable vectors rather than vectors sampled from a fixed probabilistic distribution is a neat idea. The experimental section presents a comprehensive comparison with several related methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a family of Transformers, called Spectraformers, introducing a new class of linear low-rank attention mechanisms. In Spectraformer, the nonlinear map defining the transformation of queries/keys combines various nonlinear functions (applied to the queries/keys projected via Gaussian or learned projections). This formulation is general enough (Eq. 11) to cover as special cases various mechanisms introduced before for the unbiased estimation of the softmax kernel (including in particular celebrated positive random features). The Authors test presented mechanism on three tasks from the Long Range Arena benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The conclusions are stated pretty vaguely, we read: \" Our empirical findings indicate that different kernels are good at different tasks and that kernel choice is fundamental to performant models\". This is not a particularly informative statement. Learning the projections rather than taking them from a fixed distribution might introduce additional computational costs. This should be discussed in depth in the paper. Finally, LRA is a pretty old benchmark for testing long-range-attention Transformers. The paper would benefit from applying more recent benchmarks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Based on Table 3, the performances seems significantly different depending on the combination of the component functions and a learnable weight matrix. Is there a guideline for selecting the best combination before training? \n\n\n\n\n\n* When employing $Spectraformer$, it is unclear how to pick the better combinated component functions and weight matrix that can outperform the standard attention."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* This work presents the framework generalizing the random-feature based attention method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The kernel approximation, based on Bochner’s theorem, enables the computation of linearized attention to be understood as a similarity computation using component functions $\\phi(\\cdot)$ and a learnable weight matrix $W$. Based on this interpretation, recent works have improved attention methods by presenting new component functions and parameterizations of the weight matrix.\n\nThis work further explores how various combinations of weight matrices and component functions, proposed in previous works, can improve the attention method, and refers to it as $Spectraformer$."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* This work recombines the component functions $\\phi(\\cdot)$ and a learnable weight matrix $W$ presented in existing random-feature attention method, and does not present new idea for improving attention. Thus, the novelty of this work itself seems marginal.\n\n\n\n* The benefits of exploring other combinations are not convincingly demonstrated. While it is possible that certain unexplored combinations of component functions and learnable weight matrices could improve accuracy, training time, or memory efficiency for some condition, this work does not clarify when or why certain combinations might be advantageous."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In addition to the questions above, I have a few more additional questions. \n\n- Why for any $\\mathbf{W}$, $f$ = Trig a valid component function? (line 263)\n- In line 284, what does it mean “among combination with trig RF”? This is true for positive random features as well (see Sec 4.2 in [1])\n- Finally training the kernel to mimic the spikyness of softmax can lead to performance gains as observed in [2]. Does similar observations work in this setting? \n\n[1] Rethinking Attention with Performers. Choromanski et al. ICLR 2021\n\n\n[2] The Hedgehog & the Porcupine: Expressive Linear Attentions with Softmax Mimicry. Zhang et al. ICLR 2024"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Good overview and discussion about various random feature mechanisms for linearizing attention in Transformers.\n- Interesting observation that there is no clear random feature method excelling at all the tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Attention mechanism in Transformers have quadratic time complexity in the sequence length, which can create computational bottlenecks. Various methods have been proposed to address this, with kernel-based approaches for \"linearizing\" attention gaining significant interest in the ML community. In this work, the authors introduce Spectraformer, an unified framework for approximating and learning the kernel function in linear Transformers. The authors explore a range of sampling techniques and random features, demonstrating that different tasks benefit from different combinations of kernel methods, with no single method emerging as superior across all cases. The empirical results are evaluated using the LRA benchmark."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Please rewrite Sec 3.4 in terms of pseudocode. Please do not point to specific lines of code.\n- Writing needs to be improved, for example the concepts are mentioned before they are defined. Def 3.2 defines a valid component function even though this concept was mentioned in line 262-263.\n- QMC is explored in the context of shift-invariant kernels in [1] and also in general random features [2]. It feels incremental without any theoretical results as the authors are merely combining different matrices (that have been used to reduce variance) in some well-known modern random feature mechanism. It would be an extremely valuable work if the authors show that incorporating these matrices would lower variance over the orthogonal ones. \n- The empirical results needs to be stronger. Specifically : \n * How does these methods perform in simply approximating the softmax kernel. \n * We generally see a quality gap when training Performers from scratch compared to regular transformers, does any of this combination close the gap? For ex. see performance gap between Performer-ViT vs Regular ViT (Fig 8. in [3]).\n- As pointed out by the authors, different combinations work well for various tasks thus the paper should provide some guidance for practitioners on how to choose the right combination.\n\n[1] Quasi-Monte Carlo Feature Maps for Shift-Invariant Kernels. Avron et al. JMLR 2016.\n\n[2] Hybrid Random Features. Choromanski et al. ICLR 2022.\n\n[3] From block-Toeplitz matrices to differential equations on graphs: towards a general theory for scalable masked Transformers. Choromanski et al. ICML 2022."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Spectraformer is a generic random feature framework for kernelized attention, that systematically compares past works with new alternatives. Hence, it identifies a novel combination of algorithms outperforming existing random feature transformers."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024spectraformer,\ntitle={Spectraformer: A Unified Random Feature Framework for Transformer},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zUD06a6leU},\nnote={under review}\n}"
},
"abstract": {
"value": "Linearization of attention using various kernel approximation and kernel learning techniques has shown promise. Past methods use a subset of combinations of component functions and weight matrices within the random features paradigm. We identify the need for a systematic comparison of different combinations of weight matrices and component functions for attention learning in Transformer. In this work, we introduce $\\textit{Spectraformer}$, a unified framework for approximating and learning the kernel function in linearized attention of the Transformer. We experiment with broad classes of component functions and weight matrices for three textual tasks in the LRA benchmark. Our findings indicate that different kernels are good at different tasks and that kernel choice is fundamental to performant models. Our code is available at: https://anonymous.4open.science/r/spectraformer-8A97."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"linearized attention",
"transformer",
"efficient transformer",
"kernel",
"random features"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/997b95cb95f9a53fd3e5574ca082c944b48c2f95.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/58e87dd11d6fa1c3856b778765c9bc8fd5a8097c.zip"
},
"title": {
"value": "Spectraformer: A Unified Random Feature Framework for Transformer"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zUXejfUAbx | On Logical Extrapolation for Mazes with Recurrent and Implicit Networks | main | Active | implicit networks;topological data analysis;logical extrapolation;out-of-distribution extrapolation;limit cycles;dynamics;mazes. | transfer learning, meta learning, and lifelong learning | 3;3;3;5;5;6 | 3;3;3;2;3;4 | 3;4;3;2;3;3 | 1;2;1;2;2;3 | 2;3;3;2;3;3 | 4.166667 | 3 | 3 | 1.833333 | 2.666667 | 0.237915 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "* Have the authors tried reframing the problem to classification instead of segmentation? Say, the authors were to generate a maze classification dataset where the stimuli were either mazes with a solution and mazes without a solution (where the green and red dots are disconnected), would generalization still be hurt in the other dimensions evaluated? Since the authors are only basing their conclusions in one dataset (Maze solving), the presented results may be more compelling when discussed with converging evidence on both settings of the problem."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* Logical extrapolation is a key ability of human vision. The authors show that prior work in this area need more exploration and highlight that scaling difficulty in dimensions other than model size hurts the out-of-distribution performance of RNNs and INNs, both of which have been the kind of networks which have shown logical extrapolation. This is a really interesting contribution and warrants the need to further explore this unsolved problem.\n* This work includes source code for generating the various difficulty levels of mazes (deadend_start and percolation), this is great and promotes tackling a more holistic solution to logical extrapolation by the community.\n* The writing, figures and accompanying captions have sufficient detail and the paper is quite a straightforward read."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors question the ability of recurrent and implicit neural networks in logical extrapolation. The authors demonstrate that prior approaches to logical extrapolation using RNNs and INNs on maze solving fail when the difficulty scales along dimensions other than maze size (degree of the start of mazes, presence of loops in the maze). Additionally, the authors also present an analysis of the RNN/INN latent dynamics where they highlight key signatures of either model's trajectory in the hidden dimension space."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* **Loose connection between the TDA analysis and lack of logical extrapolation**: It felt to me that the TDA analysis of RNN/INN latent dynamics is disconnected from the issue of logical extrapolation in RNNs and INNs. My biggest feedback to improve this paper further would be to strengthen the connection between these two explorations in the paper's writing as I believe this would greatly help in gleaning the contributions clearly. \n* **Overall framing of the story**: The current writing sounds quite critical of RNN/INNs ability to perform logical extrapolation. These are currently the main class of models from prior art that show (any) extrapolation to greater difficulty settings, and I felt that the writing was potentially overly critical of these model architectures. This is a minor weakness in my opinion, yet I still find it concerning since we don't have another architecture class of neural networks that do a better job at extrapolating to out of distribution settings in solving mazes."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "In appendix C.3, it looks like predictions sometimes fail due to lack of exact match between prediction and solution by a few pixels, but not by a whole maze “square”. Does this failure mode occur often in the training data, and how much does this explain additional failures in test?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The use of topological data analysis is to study sequences of RNN latents is novel, to my best knowledge.\n2. The description of the experimental setup is clear and the main points in the paper are conveyed in a way that is easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyzes the behavior of maze-solving RNNs and INNs. They take models from prior work and evaluate their generalization performance on a dataset that consists of maze problems that are harder than these models’ original training data. Unlike the training data, the test data contains mazes of larger sizes, mazes where the start node is not a dead end, and mazes with cycles. They find that the models studied generalize to larger mazes but not in the latter categories. They also use topological data analysis to study the behavior of the latent data after more RNN/INN iterations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I believe that the depth of analysis does not meet the bar of an ICLR acceptance. \n\nThis paper can be seen as a paper about generalization or interpretability. When evaluated as a paper about generalization:\n\n1. The paper observes a lack of generalization but does not provide much surprising insight or propose new techniques to improve generalization. I believe the Bansal work already covers the claim about their model generalizing to increased maze size, so the new claims are about deadend-starts and cycles. The explanation for this lack of generalization is simple:\n\n“Both DT-net and PI-net are trained to find the unique path from start to end, but when the maze has even a single loop, there is no longer a unique path. When presented with a maze that does not have a unique solution path, both models fail (see Figure 3.2 and Appendix Subsection C.3), whereas a human might reasonably reinterpret the task (e.g. “find the shortest path”, or even “find a path”) and solve it.”\n\nThe problem setup changes so certain invariants are no longer respected and the problem may no longer be well-defined. That models can fail to generalize under these conditions seems like an intuitive and well-known fact, e.g. https://arxiv.org/abs/2210.01790, https://arxiv.org/abs/2105.14111.\n\n2. The lack of generalization seems specific to the models studied. The authors do not explore whether we can easily correct for this, for instance by training on a small sample of hard data. \n\n3. The maze dataset itself is not new, so the extent of the analysis is to run previously trained models on an open source dataset.\n\n4. Failure modes are not sufficiently explored.\n\nWhen evaluated as a paper about interpretability, the analysis is observational, mostly making statements about what the latents look like and doesn’t try to explain why the dynamics occur or what the mechanism is for the model to arrive at periodic latents. It’s well known that latents may only be spuriously correlated with model mechanisms and may not have much meaning on their own, which is why most interpretability work includes causal evidence. For example, it’s possible that the latents are periodic in a direction that isn’t read by the final projection matrix, in which case this finding isn’t very interesting. TDA seems like an intriguing direction but needs to be supplemented with further analysis to see whether it relates to the models’ lack of generalization (though it seems somewhat unrelated, given that the PI-net always converges to a fixed point yet still fails to generalize)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "More comparison of the model success in different extrapolation directions would be useful. Consider discussing the models in terms of the features the base models have vs what features the different extrapolation directions require. Perhaps:\n- Increasing maze size is “more of the same” with the model able to reuse existing features.\n- Deadend start = false increases the search space size, eliminating a few initial “forced” moves. A deeper/longer solution path, increases the compute required. Figure 5 reflects this.\n- Base models were trained with percolation = 0, and so learnt to find the unique solution. They were not trained to select one path out of several valid paths. So Figure 6 results seem unsurprising. To pick one out of several valid paths (e.g. the shortest) the model would need a feature that distinguishes between valid paths (e.g. measure path length, and prefer minimum path length). To learn this feature likely requires more model training."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This is an interesting paper.\n\nGood analysis of two models in 3 extrapolation dimensions. \n\nGood mathematical analysis of the latents of the models.\n\nContent is well presented."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors explores whether a recurrent neural networks (RNN) and a implicit neural networks (INN) exhibits logical extrapolation, finding they do with extrapolate well respect to maze size, but extrapolate less well with respect to dead-end-start and percolation (loop paths). PCA analysis of latents shows 3 distinct modes. Topological Data Analysis (TDA) supports this finding"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper would benefit from going deeper in a few areas. Refer Questions section\n\nThe PCA/TDA finding of one point, two point, two circles is very interesting. Understanding how these modes relate to model performance, algorithm or similar would extend this finding. Without some implications of this finding, it’s hard to say how important this finding is.\n\nErata:\n- Page 5. Text “mazes still satisfying this condition contribute” is ambiguous. Consider using “mazes with a start position degree of 1 contribute” if this is the meaning\n- Page 5. “See Figure 3.2 for a breakdown of accuracy by start position” seems incorrect. There are multiple references to this figure that seem incorrect.\n- Inconsistent capitalization of DT-Net as DT-net. Ditto PI-Net.\n- Page 7. “Table 4” links to Table 1. Check all links."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What is the significance that the models tested cannot extrapolate well along the new axes of dead-end start and percolation? Can we apply this to other extrapolative tasks?\n\nWhat is the significance of the convergence dynamics results on this task? Do similar patterns apply to other extrapolative tasks?\n\nCan the authors show theoretical results on the extrapolative abilities of models on this maze task?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "**Originality**\n\nWhile the paper considers a task and architecture found in prior works, they conduct new analyses which reveal additional information about the inner mechanics of the model.\n\n**Quality**\n\nOverall, the experiments are thorough and well-conducted. The authors consider a number of ways to both extrapolate the task and analyze the latent dynamics.\n\n**Clarity**\n\nOverall, the paper is well-written and the figures are well-illustrated.\n\n**Significance**\n\nThis paper will likely have some significance to researchers studying extrapolation on the specific maze task studied."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies RNNs and INNs on extrapolative maze-solving tasks. The authors show empirically that prior results on networks do not generalize when the task is made more difficult on a new axis. The authors also study the dynamics of the RNNs and INNs and show that the dynamics either converge to a fixed point or a cycle."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall, the paper seems quite similar to prior work on the maze extrapolation task, and the new analyses do not seem very significant. Certainly, the authors show new results, but the broader significance to extrapolative tasks is not clear. I would encourage the authors to consider at least one other extrapolative task. Another way to improve the significance of the paper is to include theoretical results.\n\nClarity-wise, some text in figures is too small (namely, 2, 3, 4, 5, 6, 7). I encourage the authors to increase figure font size throughout."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The paper would benefit from addressing the identified experimental issues with a proposed solution and performance evaluation, which would enhance the impact of the work. Additionally, careful correction of referencing errors throughout would improve the paper's overall presentation."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This work provides evidence that challenges the claims of previous research by setting up new tests where prior work fell short, underscoring the importance of establishing limitations on earlier findings.\n\n2. The paper is well-structured across its five sections, presented with direct and concise wording, making it easy to read and follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates \"logical extrapolation\" in previously published neural network models, specifically focusing on Recurrent Neural Networks (RNNs) and Implicit Neural Networks (INNs). Prior authors defined \"logical extrapolation\" as the model's capacity to add shared-weight layers during testing, enabling it to address more complex problems than those encountered during training. In this work, the authors tackle the challenge of connecting a start and end node within maze-solving tasks. Unlike previous studies that succeeded on extrapolating maze size, this paper demonstrates that top-performing models from prior research struggle to extrapolate when additional complexity arises across different dimensions. For example, slight distribution shifts—such as allowing multiple neighbors for start nodes, or introducing cycles into previously cycle-free mazes—impede the model's ability to extrapolate.\n\nThe authors conduct a straightforward yet insightful topological analysis of the network's latent space dynamics, illuminating the underlying behaviors and convergence patterns of these networks. Findings reveal that DT-NET, the highest-performing RNN model, does not converge to a single latent point, whereas PI_NET, the top-performing INN model, does. This dynamic is explored through principal component analysis, residual analysis, and homology grouping, uncovering a complex and partially unexplored latent behavior in DT-NET. Examining network dynamics may lead to more interpretable and robust extrapolation strategies utilizing recurrence.\n\nThe paper concludes that the extrapolation capabilities of DT_NET and PI_NET are significantly influenced by the specific axis of complexity introduced. These findings challenge previous assertions about general logical extrapolation and suggest further research into why certain extrapolation dimensions are more manageable for neural networks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary reason for rejection is that the paper does not solve or propose any new solutions; it merely highlights where previous work falls short, without suggesting a new model or method to address the identified issues. In my view, experiments without any novel proposals are insufficient for a conference of this caliber.\n\n2. Testing on only 100 mazes (line 237) appears limited, especially given that prior work typically evaluates on a much larger sample size, often 1,000 to 10,000 mazes. I recommend that the authors consider increasing the test set size to provide more robust insights into model accuracy or provide a justification for why 100 mazes would be adequate for reliable performance assessment in comparison to these larger sample sizes.\n\n3. Figure 7's legends are very small, making them difficult to read. I suggest increasing the font size of the legends to match or be close to the main font size of the paper for improved readability. Additionally, Figures 5 and 6 are not referenced in the text, and Figure 3.2 is misreferenced twice (lines 253 and 258), where Figures 5 and 6 should be referenced instead, please double check all figure references in the text.\n\n4. Figure 5 suggests that only a few mazes were tested with four start neighbors, leading to an imbalanced distribution. I recommend testing more mazes with four start neighbors for a more balanced analysis or explaining why this case is less common or important. Additionally, the y-axis label “probability” is misleading, as it actually represents counts of correct and incorrect instances. A more accurate label, such as “Count” or “Number of Instances,” would improve clarity.\n\n5. Line 355 references Table 4, although it should refer to Table 1, as this is the only table in the paper, please double check all references in the text."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Generalizability Beyond Maze-Solving: The study currently focuses on maze-solving as a test bed for logical extrapolation. How do the authors view the transferability of their findings to other logical reasoning or extrapolation tasks? Would they consider additional experiments or theoretical discussions on the applicability of their results beyond maze-solving?\n\n2. Choice of TDA for Latent Dynamics Analysis: The use of Topological Data Analysis (TDA) provides unique insights, but could the authors clarify why TDA was chosen over other possible analysis techniques (e.g., standard trajectory or spectral analysis)? Additionally, have the authors considered other types of topological structures that may emerge in the latent space besides fixed points and cycles?\n\n3. Relation Between Latent Dynamics and Model Performance: The paper presents different latent dynamics (e.g., fixed points, cycles), but the connection to model performance remains somewhat abstract. Could the authors elaborate on how these dynamics influence logical extrapolation? For example, are certain behaviors (e.g., two-loop cycles) generally associated with better or worse generalization?\n\n4. Handling of Out-of-Distribution Shifts: The experiments show that both RNNs and INNs struggle with out-of-distribution shifts, such as deadend starts and percolation. Have the authors explored any methods (e.g., fine-tuning, architectural changes) to improve robustness to these shifts? Insights on potential adjustments would be valuable.\n\n5. Explanation of TDA in the Main Text: TDA is an advanced method that may be unfamiliar to some readers. Would the authors consider adding a high-level explanation of TDA, perhaps with intuitive examples, to make the analysis more accessible? This could help readers better understand the significance of Betti numbers and their relation to latent dynamics.\n\n6. Evaluation with Additional Network Architectures: The paper evaluates RNNs and INNs but does not compare them to simpler architectures. Could the authors include results for models like CNNs or MLPs on the same maze tasks? This would clarify how unique RNNs and INNs are in their extrapolation abilities and whether these findings are specific to recurrent or implicit architectures."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Originality: The paper introduces new difficulty dimensions for maze-solving (e.g., deadend starts and maze percolation) beyond simple maze size, providing fresh insights into neural network generalization. The use of Topological Data Analysis (TDA) to explore latent space dynamics is also novel in this context and adds an interesting layer to understanding neural networks’ internal behaviors.\n\n2. Quality: The experiments are well-structured, covering a variety of maze configurations. The analysis of latent dynamics using TDA methods, such as Betti numbers, is thorough and demonstrates different behaviors (e.g., fixed points, cycles), adding depth to the results.\n\n3. Clarity: The paper is clear and well-organized, with effective visual aids that support understanding of key results. The explanation of TDA concepts is accessible, which is helpful for readers unfamiliar with this method.\n\n4. Significance: This study highlights the limits of RNNs and INNs in extrapolating maze-solving tasks under varied conditions, offering valuable insights into neural network robustness and generalization. The methods and findings could inspire further research into network behaviors in complex problem-solving tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the logical extrapolation capabilities of recurrent neural networks (RNNs) and implicit neural networks (INNs) in the context of maze-solving tasks. The authors investigate whether these network architectures, when trained on simpler instances of a task (e.g., small mazes), can generalize to more complex instances (e.g., larger or differently structured mazes). Their main contributions include:\n\n1. Demonstrating that while both RNNs and INNs can extrapolate to larger mazes, their generalization is less effective when maze complexity increases through factors other than size, such as changes in the starting point configuration or the introduction of cycles.\n\n2. Introducing new difficulty axes, such as a deadend start and percolation parameters, which significantly impact model performance, revealing limits to previous assumptions about logical extrapolation.\n\n3. Providing insights into the latent dynamics of RNNs and INNs through topological data analysis (TDA). The authors observe various limiting behaviors, such as convergence to fixed points or periodic cycles, and suggest these dynamics are essential for understanding the generalization potential of such networks.\n\nOverall, the study underscores the need for careful consideration of difficulty dimensions in neural extrapolation tasks and suggests that topological tools may help improve understanding and robustness of extrapolative models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited Applicability: The study focuses solely on maze-solving tasks, which may limit the generalizability of its findings. To strengthen the paper’s relevance, it would be beneficial to test RNNs and INNs on other logical extrapolation tasks or provide more justification for why maze-solving is a suitable proxy for broader reasoning abilities.\n\n2. Complexity of TDA Analysis: The use of Topological Data Analysis (TDA) adds depth but may be challenging for readers not familiar with these tools. Including a simplified explanation or visual summary in the main text, or moving detailed technical aspects to an appendix, could improve accessibility and help readers better grasp the significance of TDA results.\n\n3. Out-of-Distribution Performance: Although the study introduces new dimensions of difficulty (e.g., deadend starts, percolation), the models show limited ability to generalize effectively in these out-of-distribution scenarios. This indicates that the models may require further improvements or modifications. Exploring architectural adjustments or fine-tuning strategies to enhance generalization across these dimensions could improve the paper’s impact.\n\n4. Interpretation of Latent Dynamics: While the paper presents examples of limiting behaviors (e.g., fixed points, cycles) in latent space, the connection between these dynamics and the models' generalization performance is somewhat limited. Providing a clearer theoretical analysis or practical insights on how these dynamics relate to extrapolation ability would make the findings more actionable and valuable.\n\n5. Evaluation Depth: The evaluation could benefit from more in-depth comparisons with other types of neural networks beyond RNNs and INNs. Testing simpler architectures, such as CNNs or MLPs, on the same maze tasks would help contextualize the benefits and limitations of RNNs and INNs, strengthening claims about their suitability for logical extrapolation."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We study out-of-distribution extrapolation using a maze dataset. We show that the extrapolating ability of existing models is less robust, and less interpretable, than previously thought."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024on,\ntitle={On Logical Extrapolation for Mazes with Recurrent and Implicit Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zUXejfUAbx},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent work has suggested that certain neural network architectures---particularly recurrent neural networks (RNNs) and implicit neural networks (INNs)--- are capable of _logical extrapolation_. That is, one may train such a network on easy instances of a specific task and then apply it successfully to more difficult instances of the same task. In this paper, we revisit this idea and show that (i) The capacity for extrapolation is less robust than previously suggested. Specifically, in the context of a maze-solving task, we show that while INNs (and some RNNs) are capable of generalizing to larger maze instances, they fail to generalize along axes of difficulty other than maze size. (ii) Models that are explicitly trained to converge to a fixed point (e.g. the INN we test) are likely to do so when extrapolating, while models that are not (e.g. the RNN we test) may exhibit more exotic limiting behaviour such as limit cycles, _even when_ they correctly solve the problem. Our results suggest that (i) further study into _why_ such networks extrapolate easily along certain axes of difficulty yet struggle with others is necessary, and (ii) analyzing the _dynamics_ of extrapolation may yield insights into designing more efficient and interpretable logical extrapolators."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"implicit networks",
"topological data analysis",
"logical extrapolation",
"out-of-distribution extrapolation",
"limit cycles",
"dynamics",
"mazes."
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/c31765813686b2df48fd92843ad049c28951ecec.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/2088630b3485c0518f0d2e4d00d70684c2de8704.zip"
},
"title": {
"value": "On Logical Extrapolation for Mazes with Recurrent and Implicit Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zUlK1qMIcE | Active partitioning: inverting the paradigm of active learning | main | Active | Partitioning;Pattern-recognition;Clustering;Active learning;Modular networks | transfer learning, meta learning, and lifelong learning | 3;3;3 | 4;4;4 | 2;2;2 | 2;2;1 | 2;2;3 | 3 | 4 | 2 | 1.666667 | 2.333333 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please see weakness above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The claims of the paper are easy to understand (though I dont quite believe them, see below)\n* The experimental results one of the datasets was interesting to read"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose to partition the dataset by using predictions from multiple models.\n\nDuring training, each sub-model is allowed to submit their predictions for all points in the datasets. The datapoints are then assigned to the sub-model with the best performance, and the sub-model is trained only these datapoints. As training proceeds, the hope is that the process induces specialization in the models, which is then translated into a partitioning. There is some connection to active learning, where datapoints are chosen for which the model is most uncertain about; whereas here, the datapoints are assigned to the model with best performance.\n\nExperimental results are reported on 6 datasets, 3 of which are unidimensional datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "TLDR; I dont think the contributions of the paper meet the conference bar.\n\n* There are lots of existing work on MOEs, this paper feels like re-inventing them from scratch. There is minimal mention to existing literature, no comparisons.\n\n* The experimental results are quite unconvincing. The scale of the datasets are just too small. Why not have larger capacity models which can learn more. The scale of the datasets + model sizes (the latter I suspect is also small), makes me question if partitioning the dataset is needed at all. \n\n* Even if we assume that partitioning is required, why not compare with simpler baselines like run clustering algorithm first, and then train independent models on the clusters?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "No"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Interesting new paradigm. Even though it's similar to the ideas of mixture of experts which are well-studies in current LLMs era, the idea of applying multiple experts and partitioning datasets are interesting in active learning literatures.\n2. The number of datasets in experiments section is impressive, including 2 two-dimensional datasets and 22 datasets from UCI Machine Learning Repository."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper discusses a new learning paradigm called active partitioning, aiming to improve model performance by leveraging competition among models. The key idea is to separate and detect distinct functional patterns within datasets by rewarding models that provide the best predictions for specific data points with additional training on those points. This encourages each model to specialize in certain patterns, allowing the datasets to be divided into specialized partitions. Unlike traditional active learning, which focuses on training models based on their weaknesses to minimize training data, active partitioning emphasizes strengthening models' specialties. The approach is tested on datasets with distinct patterns (e.g., stress and strain data), showing how models can learn different partitions. The results demonstrate improved performance, with a 54% reduction in loss compared to single models handling the entire dataset, validating the effectiveness of active partitioning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of related works: The author mentions mixture of experts algorithm in Section 2.2. There is a rich body of related works regarding applications of mixtures of experts on LLMs [1, 2, 3].\n\n2. Lack of theoretical justifications. Most of partitioning experiments have theoretical guarantees and more theoretical understandings would be helpful in understanding this algorithm.\n\n3. Datasets are too simple and small scale. Code is not open-sourced. Datasets selected are mainly from UCI Machine Learning Repository where most of them are low-dimensional and small scale in terms of datasets size. Since there are no theoretical justifications, experiments should not be limited to regression tasks. \n\n4. Ablation study of network architectures. The tasks should be not limited to regressions settings and more experiments regarding various network architectures should be discussed. The authors claim active partitioning paradigm is better than active learning but many active learning algorithms have experiments showcasing there optimality across multiple networks architectures. For instance [4] performs experiments across network architectures including networks similar to LeNet and ResNet-18.\n\nReferences: \n1. Gross, Sam, et al. Hard mixtures of experts for large scale weakly supervised vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6865–6873, 2017.\n2. Zhou, Yanqi, et al. \"Mixture-of-experts with expert choice routing.\" Advances in Neural Information Processing Systems 35 (2022): 7103-7114.\n3. Riquelme, Carlos, et al. \"Scaling vision with sparse mixture of experts.\" Advances in Neural Information Processing Systems 34 (2021): 8583-8595.\n4. Ding, Zixin, et al. \"Learning to Rank for Active Learning via Multi-Task Bilevel Optimization.\" The 40th Conference on Uncertainty in Artificial Intelligence."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. I would like to know how much time the active partitioning and training of modular model will cost compared to training a single model.\n\n2. I wonder whether the competing models used for active partitioning can be directly used to combine a modular model."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The writing in this paper is easy to understand, and the use of flowcharts and other visuals makes it easier to grasp the core methods and concepts.\n\n2. The authors provide pseudocode and detailed parameter settings in the paper, and the code is included in the supplementary materials, ensuring the reproducibility of the work."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces an algorithm that leverages competition between models to partition datasets based on distinct functional patterns. Unlike traditional active learning, which focuses on minimizing data for weak areas, this approach amplifies the strengths of models, promoting specialization. The modular models, consisting of multiple expert models each focused on learning a specific partition, demonstrate significant improvements over a single model."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The number of dataset partitioning baselines compared is insufficient. In the related work section, the authors discuss other dataset partitioning methods, while the authors did not compare active partitioning with any of these methods. The authors may supplement the baselines or explain why there is no comparison between them.\n\n2. The modular model tends to underperform compared to a single model when the split dataset using active partitioning exhibits one coherent pattern or when multiple patterns have significant overlap. A related work [1] that first attempts to solve a problem with a single network and handles the unsolved portion(s) of the input domain recursively seems to be superior to the proposed method.\n\n3. The authors claim that the novelty of this work lies in “the development of a flexible partitioning method through the competition of entire models,” but what advantages does this approach offer compared to previous dataset partitioning methods? The motivation behind the proposed method needs further elaboration and clarification.\n\n4. The authors may provide a more detailed introduction to active learning and elaborate on how the proposed method invests in the paradigm of active learning.\n\n5. The paper is lack of detail in the dataset partitioning phase. The authors mention that competing models might exhibit differences, such as using ‘wider neural networks or smaller learning rates’ for different patterns, but they do not provide concrete details on how model diversity is implemented during this stage. It would be beneficial to elaborate on how these variations are chosen and how they impact the effectiveness of the partitioning process.\n\n6. The authors sometimes mix in-text and parenthetical citations throughout the related work section, such as \"Wu et al.adapted ... (Wu et al., 2004).\"\n\n[1] V Scott Gordon and Jeb Crouson. Self-splitting modular neural network-domain partitioning at boundaries of trained regions. In: Proceedings of the 2008 IEEE International Joint Conference on Neural Networks, pp. 1085–1091, 2008."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose an algorithm which partitions a dataset along functional patterns, based on competition and inverting the active learning paradigm."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024active,\ntitle={Active partitioning: inverting the paradigm of active learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zUlK1qMIcE},\nnote={under review}\n}"
},
"abstract": {
"value": "Datasets often incorporate various functional patterns related to different aspects or regimes, which are typically not equally present throughout the dataset. We propose a novel, general-purpose partitioning algorithm that utilizes competition between models to detect and separate these functional patterns. This competition is induced by multiple models iteratively submitting their predictions for the dataset, with the best prediction for each data point being rewarded with training on that data point. This reward mechanism amplifies each model’s strengths and encourages specialization in different patterns. The specializations can then be translated into a partitioning scheme. The amplification of each model’s strengths inverts the active learning paradigm: while active learning typically focuses the training of models on their weaknesses to minimize the number of required training data points, our concept reinforces the strengths of each model, thus specializing them. We validate our concept -- called active partitioning -- with various datasets with clearly distinct functional patterns, such as mechanical stress and strain data in a porous structure. The active partitioning algorithm produces valuable insights into the datasets’ structure, which can serve various further applications. As a demonstration of one exemplary usage, we set up modular models consisting of multiple expert models, each learning a single partition, and compare their performance on more than twenty popular regression problems with single models learning all partitions simultaneously. Our results show significant improvements, with up to 54% loss reduction, confirming our partitioning algorithm’s utility."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Partitioning",
"Pattern-recognition",
"Clustering",
"Active learning",
"Modular networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/15f9bf8eb572a80ace6c615a38d42122b7adfa11.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/8b7d89d9409dbb7ca6a69fb413df0eac57268c28.zip"
},
"title": {
"value": "Active partitioning: inverting the paradigm of active learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zUrdd5NRLH | GROD: Enhancing Generalization of Transformer with Out-of-Distribution Detection | main | Active | OOD detection;learning theory;transformer models | interpretability and explainable AI | 3;5;5;5;6 | 3;4;3;4;4 | 2;2;2;3;3 | 1;2;2;3;3 | 2;2;2;3;3 | 4.8 | 3.6 | 2.4 | 2.2 | 2.4 | 0.666667 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* The main motivation for this study is unclear, given that existing methods already achieve strong results on the OOD detection benchmarks considered. For example, [8] achieves competitive performance on CIFAR10 vs. CIFAR100 without using additional information (i.e., without pre-trained models). Thus, the necessity of the proposed pipeline remains uncertain.\n\n* The authors should clearly outline their contributions over similar works [1-7], detailing the limitations of previous approaches and supporting these claims with comprehensive experiments.\n\n* The authors are encouraged to explore a broader range of architectures and models rather than focusing solely on ViT-B16.\n\n\n* Tiny ImageNet has overlap with both CIFAR-10 and CIFAR-100. How do the authors justify considering these datasets as ID and OOD?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The experimental results cover multiple modalities, including both text and image data.\n\n* The study provides both theoretical analysis and experimental validation to support the proposed pipeline."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces GROD, an approach to enhance transformers' OOD detection performance by incorporating synthesized OOD data. GROD leverages a Probably Approximately Correct (PAC) theory framework, proposing a learnable criterion for transformers that improves their ability to recognize OOD instances. By integrating OOD misclassification penalties into the loss function and generating synthetic outliers through PCA and LDA projections, GROD establishes a more robust boundary between in-distribution and OOD data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* (A) The authors should explore additional architectures, including MLP-based and CNN-based models, and explain how their method would apply to these. While the study clarifies that it focuses on transformers, it should explicitly address the pipeline’s compatibility with different architectures and provide a discussion on potential adaptations.\n\n* (B) The study includes only a limited number of transformer-based architectures, specifically ViT-B16 and BERT.\n\n* (C) The datasets used in this study are relatively small (e.g., CIFAR vs. SVHN). Larger and higher-resolution benchmarks (e.g., ImageNet, Texture) should be considered to show the contribution.\n\n* (D) Several studies have incorporated synthetic sampling strategies for OOD detection [1,2,3,4,5,6], but there is a lack of comparison with these methods.\n\n* (E) The primary idea of the pipeline shows similarities with [7].\n\n\n* (F) Using a large pretrained model, such as ViT-B16, for the relatively small Tiny ImageNet dataset raises the issue that the pipeline may rely on extra information seen by the backbone during pretraining rather than on the proposed pipeline itself.\n\n\n[1] Lee et al., \"Training Confidence-Calibrated Classifiers for Detecting Out-of-Distribution Samples,\" ICLR 2018.\n\n[2] Kirchheim et al., \"On Outlier Exposure with Generative Models,\" NeurIPS ML Safety Workshop, 2022.\n\n[3] Du et al., \"VOS: Learning What You Don’t Know by Virtual Outlier Synthesis,\" ICLR 2022.\n\n[4] Tao et al., \"Non-Parametric Outlier Synthesis,\" ICLR 2023.\n\n[5] Du et al., \"Dream the Impossible: Outlier Imagination with Diffusion Models,\" NeurIPS 2023.\n\n[6] Chen et al., \"ATOM: Robustifying Out-of-Distribution Detection Using Outlier Mining.\"\n\n[7] \"Fake It Till You Make It: Towards Accurate Near-Distribution Novelty Detection.\"\n\n[8] Deep Hybrid Models for Out-of-Distribution Detection"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above weakness"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper establishes a PAC learning framework for OOD detection applied to transformers, providing necessary and sufficient conditions for learnability and error boundary estimates. The approach of generating synthetic outliers using PCA and LDA projections is innovative and contributes to the robustness of the model.\n\n2. GROD enhances the generalization capabilities of transformers, leading to improved performance on both ID and OOD data across different tasks and data types. The algorithm achieves SOTA results in OOD detection for both NLP and CV tasks, outperforming other prevalent methods.\n\n3. The paper includes extensive experiments and ablation studies that validate the effectiveness of GROD and provide insights into hyperparameter tuning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a PAC learning framework for OOD detection of transformer networks. And it also propose a novel approach GROD to improve the OOD detection performance, including a loss function that penalizes the misclassification of OOD data, and a method for generating synthetic outliers. The GROD algorithm is evaluated across various OOD detection tasks in NLP and CV, demonstrating state-of-the-art performance regardless of data format."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The pre-training of GROD is conducted on the ImageNet-1K dataset, whereas OOD detection is evaluated using the CIFAR dataset. Some categories overlap, such as dogs and cats, which seems unreasonable.\n2. In line 147, \"Feat() represents extracting CLS tokens,\" which implies that the GROD algorithm utilizes the CLS token for feature extraction. While it is true that many transformer-based models do not necessarily require a CLS token, reducing the generality of the algorithm.\n3. How is the scalability of GROD algorithm? If it work well on other transformer-based pretrained backbone?\n4. Can the GROD algorithm be adapted to other types of deep learning architectures beyond transformers (e.g. , ResNet)? It seems to be only related to the input feature."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "+ \"Learnability\" is repeatedly used without a concise definition in layman’s terms, which could be clarified for a broader audience.\n+ How do the authors interpret the performance of LDA-based inter-class OOD generation in enriching OOD representation? More specifically, what are the primary limitations observed when using PCA-only projections, and how might these affect model robustness?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The theoretical foundation provided, especially the PAC learning framework for OOD detection, is a noteworthy contribution that bridges theoretical gaps in understanding transformers learnability for OOD tasks.\n- The paper provides thorough experimental validation across multiple OOD detection tasks for both NLP and CV, showing GROD’s adaptability to various data formats, which is admirable.\n- Key terms and concepts, such as the PAC learning framework and the GROD algorithm, are introduced clearly, though some technical sections might benefit from additional explanation for accessibility.\n- The theoretical insights, especially the derived conditions and error bounds for learnability in transformers, could pave the way for future advancements in OOD detection frameworks for transformers, making it a valuable reference for ongoing research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a novel algorithm, GROD, aimed at enhancing OOD detection in transformer networks, which is a timely and innovative addition to current research. By combining PCA and Linear Discriminant Analysis (LDA) projections for OOD data synthesis, it proposes an original approach to address limitations in existing OOD detection methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ The GROD algorithm involves several hyperparameters (e.g., the scaling parameter for Mahalanobis distance, LDA cluster dimensions) that require fine-tuning. \n+ While GROD achieves a balance between computational efficiency and performance, its iterative processes, including OOD data synthesis and Mahalanobis distance calculation, may not scale well with significantly larger datasets (e.g., ImageNet) or models. This limitation could restrict its deployment in real-time applications where processing speed is crucial.\n+ There are missing citations in the manuscript. For example, the paper introduces generative models, but generative-based methods [1, 2, 3] are missed without corresponding details in the bibliography. \n\n[1]: Kong, Shu, and Deva Ramanan. \"Opengan: Open-set recognition via open data generation.\" In ICCV. 2021.\n\n[2]: Wang, Qizhou, et al. \"Out-of-distribution detection with implicit outlier transformation.\" In ICLR, 2023.\n\n[3]: Zheng, Haotian, et al. \"Out-of-distribution Detection Learning with Unreliable Out-of-distribution Sources.\" In NeurIPS, 2023."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Comparison with Standard Outlier Exposure (OE) Baseline:\nHow does GROD’s approach to OoD data generation compare to traditional Outlier Exposure (OE) methods, particularly when using standard Gaussian noise or other simple forms of synthetic OoD data?\n\n *Suggested experiment*: Implement a baseline comparison between GROD and OE methods (e.g., Gaussian noise or straightforward OE with diverse datasets). This experiment could involve evaluating performance differences in OoD performance and computational efficiency.\n\n2. Impact of GROD’s Data Generation Methodology:\nDoes GROD’s use of PCA and LDA projections for generating synthetic OoD data significantly outperform simpler methods?\n\n *Suggested experiment:* Conduct an ablation study comparing GROD’s synthetic OoD data generation method to simpler techniques like Gaussian noise or uniformly random OoD data. Evaluate performance of using the proposed technique but instead of the proposed data generation use Gaussian noise at the input.\n\nIf the authors can provide sufficient evidence for the benefits of the proposed OoD data generation method over using simpler techniques and provide result on comparison with OE I am willing to increase my score."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "**1. PAC Learning Framework:** The paper is based on theory, and makes sufficient contribution to the theory of OoD and PAC learnability of OoD however only for transformer architectures.\n\n**2. Computational Efficiency:** Despite the complexity of GROD, the overhead is during training and inference cost is relatively inexpensive.\n\n**3. Ablation Studies:** Ablation studies provide insights into key parameters that control the performance on GROD, which could help guide future work in OOD detection using transformers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The GROD paper introduces Generate Rounded OoD Data (GROD), designed to improve the generalization of transformer models in Out-of-Distribution (OoD) detection. This method leverages synthetic OOD data generated using PCA and LDA projections to refine decision boundaries during training, aiming to enhance performance on both in-distribution (ID) and OoD data. GROD is supported by a PAC learning framework and validated through experimental results demonstrating its state-of-the-art performance in OoD tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**1 . Baseline Comparison with Outlier Exposure (OE):** While the authors propose a synthetic OoD generation approach, they do not include a comparison to Outlier Exposure (OE) methods. A comparison, especially with traditional OE using Gaussian noise, would be valuable in demonstrating GROD’s necessity and superiority. OE [1] proposed using additional OoD data which is used to train/finetune a model to better OoD detection performance, a similar to the idea presented in this paper.\n\nCitations of Related OE Work: The paper does not cite several relevant studies in the Outlier Exposure space, which is a significant oversight given that GROD’s fundamental methodology aligns closely with existing OE methods that use OoD data during fine-tuning. See citations [1, 2].\n\n**2. Evaluation of Synthetic OoD Data:** While GROD’s OoD data generation is sophisticated, it is unclear if the benefits of PCA and LDA projections over simpler alternatives like Gaussian noise have been adequately evaluated. Including a comparison experiment would strengthen claims about the effectiveness of GROD’s approach in OoD data synthesis.\n\n[1] Hendrycks, Dan, Mantas Mazeika, and Thomas Dietterich. 2019. \"Deep Anomaly Detection with Outlier Exposure.\" In Proceedings of the International Conference on Learning Representations. https://openreview.net/forum?id=HyxCxhRcY7.\n\n[2] Zhu, Jianing, Yu Geng, Jiangchao Yao, Tongliang Liu, Gang Niu, Masashi Sugiyama, and Bo Han. 2023. \"Diversified Outlier Exposure for Out-of-Distribution Detection via Informative Extrapolation.\" In Proceedings of Advances in Neural Information Processing Systems, vol. 36, 22702–22734."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper establishes the OOD detection learnability of the transformer model.\n2. This paper considered both NLP and CV scenarios."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper establishes the OOD detection learnability of the transformer model via PAC learning theory. The GROD is proposed to enhance the detection performance of transformer-based models for both CV and NLP tasks, which generate virtual OOD samples for fine-tuning. GROD first identifies the boundary ID samples by PCA and LDA and synthesizes the fake OOD by Gaussian mixtures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Why do the maximum and minimum values projected by PCA and LDA are considered boundary points? Further analysis of the intrinsic mechanism of PCA and LDA is needed.\n2. As claimed in line 149, LDA is selected to guarantee the robustness of generated OOD, but it is only utilized when the number of ID classes is small as defined in Equation (4). Does this mean that the generated OOD samples are not robust with large-scale ID datasets?\n3. The baseline NPOS adopts a similar OOD synthesis pipeline, which first identifies boundary ID samples and then generates OOD samples via Gaussian sampling. The superiority of GROD against NPOS should be explicitly stated and the generated OOD samples of the two methods can be statistically compared to further distinguish GROD.\n4. The notations are confusing, e.g., line 144 indicates the feature space is $\\mathbb{R}^{n\\times s}$, however, line 168 defines another $n$.\n5. The experiments are insufficient to prove that GROD achieves SOTA performance. Since the authors leverage the OpenOOD benchmark, more far-OOD datasets, such as Textures, Places-365, and MNIST, can be tested to validate GROD's performance."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024grod,\ntitle={{GROD}: Enhancing Generalization of Transformer with Out-of-Distribution Detection},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zUrdd5NRLH},\nnote={under review}\n}"
},
"abstract": {
"value": "Transformer networks excel in natural language processing (NLP) and computer vision (CV) tasks. However, they face challenges in generalizing to Out-of-Distribution (OOD) datasets, that is, data whose distribution differs from that seen during training. The OOD detection aims to distinguish data that deviates from the expected distribution, while maintaining optimal performance on in-distribution (ID) data. This paper introduces a novel approach based on OOD detection, termed the Generate Rounded OOD Data (GROD) algorithm, which significantly bolsters the generalization performance of transformer networks across various tasks. GROD is motivated by our new OOD detection Probably Approximately Correct (PAC) Theory for transformer. The transformer has learnability in terms of OOD detection that is, when the data is sufficient the outlier can be well represented. By penalizing the misclassification of OOD data within the loss function and generating synthetic outliers, GROD guarantees learnability and refines the decision boundaries between inlier and outlier. This strategy demonstrates robust adaptability and general applicability across different data types. Evaluated across diverse OOD detection tasks in NLP and CV, GROD achieves SOTA regardless of data format. The code is available at https://anonymous.4open.science/r/GROD-OOD-Detection-with-transformers-B70F."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"OOD detection",
"learning theory",
"transformer models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/2192fc7295fa5f51867a5da33e867e0fd55ace62.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "GROD: Enhancing Generalization of Transformer with Out-of-Distribution Detection"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zUtl4kJa0C | Revisiting Critical Learning Periods in Deep Neural Networks | main | Active | DNN;Critical Learning Period | other topics in machine learning (i.e., none of the above) | 3;5;5;6 | 2;3;3;3 | 2;2;3;3 | 1;3;2;3 | 2;3;2;3 | 4.75 | 2.75 | 2.5 | 2.25 | 2.5 | 0.927173 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weaknesses part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The authors present a well-supported critique of FI-based explanations, followed by a thorough theoretical framework for understanding CLPs from an optimization standpoint.\n- The paper is generally clear, with a logical flow that makes the theoretical ideas accessible.\n- Provides new theoretical framework for understanding CLPs"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper revisits and challenges the existing understanding of Critical Learning Periods (CLPs) in Deep Neural Networks. The authors demonstrate that the prevalent explanation of CLPs based on Fisher Information (FI) dynamics and model memorization is inaccurate. Traditionally, FI has been used to explain CLPs by correlating high FI values with increased model memorization during early training stages. However, the authors argue that such interpretations are flawed, as FI merely indicates sensitivity to training data and is influenced by noise rather than accurately representing memorization or CLPs. Through theoretical analysis and empirical results, the authors introduce a new metric - the effective gradient, to demonstrate that early training epochs disproportionately impact final performance due to their effect on model optimization dynamics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. All experiments were conducted only on CIFAR-10 dataset, without validation on larger-scale datasets like ImageNet. Also, there is no exploration of domains beyond image classification. Testing on more complex datasets and more types of defective data could further validate the generality of the effective gradient metric. Do the authors expect their findings to generalize to larger datasets or other domains?\n\n2. The calculation of effective gradients, as discussed, relies on access to the full gradient, which is challenging to obtain in real-time during training. Although the paper acknowledges this limitation, it lacks concrete solutions or approximations that could make the effective gradient metric feasible in practical scenarios. How would the authors propose to calculate effective gradients during actual training, rather than post-hoc? Or is it true that effective gradient can only be used as a post-training analysis tool and not for actual guidance in training? Could the authors discuss any existing methods for approximating full gradients that might be applicable, or outline potential research directions for developing such approximations?\n\n3. The paper claims to \"shed light on enhancing model performance and robustness through such periods\" in the abstract. Could the authors provide examples or guidelines on how this understanding of CLPs could be used to improve training strategies?\n\n4. What are the computational requirements for calculating effective gradients in large-scale training scenarios? Could you provide a complexity analysis? Or could you provide an estimate of the additional computational overhead (e.g., in terms of time or memory) required to calculate effective gradients for a typical training run on your experimental setup?\n\nI'd be happy to raise the score if the author could address my concerns."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Why is mutual information (MI) a good metric to characterize memorization? Especially in light of prior works that shows other metrics maybe more useful. An intuitive notion of memorization is to perfectly reconstruct input from embeddings. However, its not clear if the representations considered in the study can perfectly construct the input.\n- Please consider discussing more works on memorization in the paper as noted in Weaknesses (No need for a full list as the literature appears to be vast but important works such on noisy label memorization and/or noisy input memorization would be useful to general readers)\n- The arguments in Section 2.2 and 2.3 may need to be simplified or expanded so that a general audience may appreciate the points raised by the authors especially in light of prior work (Jastrz ̨ebski et al.)[https://arxiv.org/abs/2012.14193] that appears to suggest trace of FI is a good metric for memorization\nOverall, my decision to not support acceptance at this point is due to the concerns noted above and in the Weakness section. I look forward to a discussions with the authors to clear up any misunderstandings on my part."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper studies CLPs which is an phenomenon that is of interest to the research community (as evidenced by citations to Achille et al.'s work)\n- Training dynamics as quantified by mutual information (MI) and Fisher Information (FI) is interesting in the setting covered in the paper\n- The paper proposes effective gradient, a new metric to explain CLPs. A new metric is commendable especially given its convergence behavior (Theorem 2) but please see weakness for concerns"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies the validity of existing explanation via Fisher Information (FI) metric to quantify critical learning periods (CLPs) phenomenon. The paper uses mutual information (MI) between the input and neural representation as an alternate metric for memorization and studies the dynamics of MI and FI. Empirically, the paper shows that CLPs can extend beyond memorization phase as quantified via MI. The paper introduces a new metric called Effective Gradient that is optimization-based that shows better sensitivity to training under noisy (or defective) inputs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper does not define memorization clearly but starts using this term. This vague use can be very confusing to the reader. A suggested place to consider to define memorization is at the start of Section 2. \n\n- The paper uses mutual information (MI) to measure memorization in Section 3.2. It would be beneficial to define this even before the mathematical preliminaries in prior subsections (also see first point). More importantly, there is no justification on why MI is a good definition of memorization in neural networks. A quick read of one of the references (Kleinman et al.)[https://arxiv.org/abs/2010.02459] appears to suggest that other/better metrics are preferred over MI to quantify memorization. The paper could benefit from a discussion of why MI is preferred in the current work over other metrics.\n\n- (continuing with memorization metrics) There is vast literature that appears to define and use memorization measures some of which relates to noisy labels while others may related to noisy inputs. The paper would benefit if the paper includes a proper discussion of prior work to help both new and experienced readers to understand the current work's viewpoint\n\n- There are other references that talk about memorization and Fisher Information (FI) that needs to be discussed in the paper. See (Jastrz ̨ebski et al.)[https://arxiv.org/abs/2012.14193] for instance. These prior works suggest that trace of FIM is good enough memorization measure. A discussion on preferring MI over other proposed measures could be useful.\n\n- Certain sections on the paper are hard to follow. For instance it is hard for me to pick up the explanation in Sections 2.2 and 2.3 on FI and memorization. \n\n- The paper proposes effective gradient. However, this metric is non-measurable so an approximation is used. However, the paper uses an approximation that relies on knowing the final solution (weights) which severely reduces the usefulness of this quantity both in concept as well as practice. The paper notes that the effective gradient is ``practically inaccessible''. However, the lack of access to effective gradient is not just in practice but also in theory in the case of online learning-like settings. A clarification on what settings effective gradient applies to would help clarify claims made in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "(1) Since defective data is also \"new\" data, how can one distinguish between the different impacts introduced by \"new\" data versus \"defective\" data? Did the authors conduct any experiments or have any insight on what the effective gradients would look like when training on new but high-quality data (where there is a domain shift but no concerns about data quality)?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "(1) This paper develops solid theories and methods, supported by experiments, to demonstrate that the CLPs period may not necessarily align with, nor be explained by, certain patterns in FI dynamics.\n\n(2) The proposed metric of effective gradients offers meaningful insights into the training process of deep learning models optimized by stochastic gradient descent, making it a great fit for investigating and explaining CLPs. Key findings derived from effective gradients provide valuable and novel perspectives on understanding CLPs, and it may effectively inspire future research that aligns with the interests of the ICLR community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper revisits the critical learning periods (CLPs) in training deep networks and challenges the existing, prevalent Fisher Information (FI)-based explanations. The authors first highlight the misalignment between CLPs and FI dynamics due to defective data in the early training stages. To enhance the understanding of CLPs, the authors propose a novel metric called effective gradients and investigate its relationship with CLPs. Experiments on various popular deep learning models support the proposed theories. Several key findings are summarized based on the experimental results, and providing readers with novel insights into CLPs, along with other detailed discussion and analysis."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "(1) The experiments lack statistical analysis of the variance in metrics and model performance during CLPs, especially given that it is the \"stochastic\" gradient descent that behinds the research problem and motivates the proposed novel metrics. The authors are encouraged to add variance plots to the figures and conduct statistical tests to help readers better evaluate the consistency of the key findings.\n\n(2) The explanation of experiment results regarding the relationship between FI dynamics and the memory phase seems questionable. In both Fig. 1 (b)(c), there is a slight initial increase followed by a decrease in FI at the beginning of training, although the change is not as large as in the defective training periods. Do these small changes of FI, at the beginning of the training, align well with the existing explanations of memorization and forgetting phases? Can one then simply conclude that \"the surge in FI is a consequence of defective training\"? The authors are encouraged to provide more explanations and discussion on this.\n\n(3) Given the challenge of precisely defining CLPs in the training of deep learning models, the authors are encouraged to provide a clear problem setting for the CLPs that this paper aims to investigate and address, ideally in the first paragraph of the introduction. Personally, I found myself a bit lost in Sections 2.2 and 2.3, trying to understand what CLPs should be without considering FI dynamics, and then gained a much clearer understanding when I reached the discussion section, particularly the paragraph, \"Critical learning periods are not time windows with clear boundaries.\" Additionally, the authors mentioned \"The CLPs refer to the phenomenon where ...\" in Section 4.2 but did not provide references there. In summary, previewing some key findings (like potentially a new \"definition\" and \"understanding\" of CLPs) in the early part of the introduction would help strengthen the writing and clarify the definition of CLPs in this paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper considers an important problem of understanding learning dynamics and critical phases of neural networks. \n- The manuscript is well-written and largely builds arguments nicely.\n- The two main arguments that FI is a symptom of a learning phase not the cause as well as effective gradients as better CLP explainer are well supported, both by theory and empirical evidence. \n- The findings of the submissions shed light on learning phases. The may help train models more efficiently, e.g. by understanding at what training phase data has to be cleaner, which has implications for data curation but also augmentation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The submission investigates critical learning periods (CLP), in which models can be permanently impaired by defective data. The authors refute the previous explainer that relied on FI and related the impairment to memorization. Instead, they develop 'effective gradients' as a metric to identify updates in which the model learns meaningful information. They demonstrate that effective gradients are well-suited to explain CLPs, while FI cannot."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While I appreciate the notion of the effective gradient, the derivation of it takes up a lot of space in the paper. Effectively, this could have been a simple statement of 'alignment between current gradient and $(w_final - w_i)$. As far as I can tell, the takeaway from the theory is that the effective gradient converges 0 while the actual gradient doesn't, hence while the effective gradient is large the model can be tripped up by bad data? In that case, it might help to start with a motivation / summary of the argument rather than building the argument up from the parts.\n- The effective gradient provides good grounding for the experiments in this paper, but it's only available for post-hoc analysis. It seems to me that it might be correlated to per-step loss reduction, albeit not perfectly. Metrics like these might be measurable while training and provide actionable insights at what training phase to be extra careful about data quality. \n- The experimental evaluation clearly supports the main argument of the paper. However, some details are unclear. For instance, the 'optimum' used to compute the effective gradient, is it the same between the different models, or do they each get their own, individual optima? Also, the claim that models with defective training reach a different basin would benefit from more support, e.g. by computing the mode connectivity between the clean/defective solution. \n- The paper is generally well-written and carries a clear message. However, there seems to be some unnecessary math like on page 2 the KL. In the interest of clarity, I would recommend dropping it unless it helps make the argument."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024revisiting,\ntitle={Revisiting Critical Learning Periods in Deep Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zUtl4kJa0C},\nnote={under review}\n}"
},
"abstract": {
"value": "Deep neural networks (DNNs) exhibit critical learning periods (CLPs) during early training phases, when exposure to defective data can permanently impair model performance. The prevalent understanding of such periods, primarily based on the interpretation of Fisher Information (FI), attributes CLPs to the memorization phase. However, our theoretical and empirical study exhibits that such explanations of CLPs are inaccurate because of the misunderstanding of the relationship between FI and model memorization. As such, we revisit the CLPs in DNNs from the information theory and optimization perspectives, gaining a better and more accurate understanding of CLPs. \nWe visualize model memorization dynamics and observe that CLPs extend beyond the memorization phase. Additionally, we introduce the concept of the effective gradient, a novel metric able to quantify the actual influence of each training epoch on the optimization trajectory. Our empirical and theoretical analyses reveal that the norm of effective gradients generally diminishes over training epochs and eventually converges to zero, highlighting the disproportionate larger impact of initial training on final model outcomes. Besides, this insight also clarifies the mechanism behind permanent performance degradation due to defective initial training: the model becomes trapped in the suboptimal region of parameter space. Our work offers novel and in-depth understandings of CLPs and sheds light on enhancing model performance and robustness through such periods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"DNN",
"Critical Learning Period"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3d3c1ab2024b89f6a274a00fbff7d7786e0a5017.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Revisiting Critical Learning Periods in Deep Neural Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zV2cgXk2aY | Sentinel: Multi-Patch Transformer with Temporal and Channel Attention for Time Series Forecasting | main | Active | Transformer;Time Series Forecasting;Attention mechanism | learning on time series and dynamical systems | 3;3;3;5 | 4;4;4;3 | 2;2;2;2 | 1;2;2;2 | 2;2;2;3 | 3.5 | 3.75 | 2 | 1.75 | 2.25 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Encoder-Decoder Design Choices: Why does the model use Multi-Patch Channel attention in the encoder and Multi-Patch Time in the decoder? The ablation study in Table 3 provides limited insights—could more analysis clarify the necessity and effectiveness of this design choice?\n\n2. Simultaneous Temporal and Inter-Channel Modeling: Given the importance of both temporal and inter-channel dependencies, could mechanisms be explored that allow these relationships to be learned simultaneously rather than sequentially?\n\n3. Error Bars for Figure 3: Including error bars in Figure 3 would enhance the clarity of the trend and allow for a better understanding of variability in the results.\n\n4. Further Analysis of Multi-Patch Attention: Additional analysis, particularly regarding the effectiveness of multi-patch attention, would provide valuable insights into its utility and potential limitations."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-written, with a clear presentation of both methodology and results.\n2. Experimental results are competitive and close to state-of-the-art (SOTA) performance.\n3. The code is made available, which facilitates reproducibility.\n4. Targeted Design for Temporal and Inter-Channel Dependency Modeling: The focus on addressing both temporal and inter-channel dependencies in time series data highlights a well-considered model design."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Sentinel, a transformer-based model for time series forecasting that effectively captures dependencies across both temporal and inter-channel dimensions. It further proposes a novel multi-patch attention mechanism to replace the standard multi-head attention in the Transformer architecture. Experimental results suggest the model’s effectiveness in capturing these relationships and forecasting time series."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Novelty: The contributions appear to be incremental. The multi-patch attention mechanism provides a marginal improvement over existing architectures, and the modeling of both temporal and inter-channel dimensions in time series is also not a new idea.\n \n2. Justification of Multi-Patch Attention: The rationale behind why multi-patch attention performs better than traditional multi-head attention is not fully explained. An analysis of the root causes for its effectiveness, ideally with theoretical insights or visualizations, would strengthen the contribution.\n\n3. Performance Compared to SOTA: The proposed model does not achieve state-of-the-art performance compared to the provided baselines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The final projection layer flattens the embeddings of all tokens and maps them to the output, right? So, even though a decoder is used, a separate model still needs to be trained for each length?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. its ability to capture both temporal and inter-channel dependencies crucial for multivariate time series forecasting.\n2. by utilizing a patching process, Sentinel introduces a new attention mechanism that replaces traditional multi-head attention splitting. This shifts the focus from \"heads\" to \"patches,\" integrating more naturally into the Transformer architecture.\n3. through ablation studies, the paper demonstrates the contribution of the proposed components to the overall predictive performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The author proposed an encoder-decoder model structure that can simultaneously capture temporal relationships and variable relationships. Additionally, the author employed methods such as multi-patch to enhance the model's performance, achieving comparable performance on benchmark datasets by addressing both dependency types effectively."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The main weaknesses of the article lie in its novelty and results.\n* The methods used in the article mostly have already been proposed by others, and the simple stitching together of ideas makes the article lack novelty.\n* The experimental results of the article are mostly the second.\n* The main experiments in the article are limited. It might be worth considering adding short-term experiments and incorporating new datasets. For example, there are many new datasets available here: https://huggingface.co/datasets/Salesforce/lotsa_data.\n* Lack of sensitivity analysis of parameters."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- About the method: The approach of modeling the channel and time dimensions separately lacks novelty. The proposed multi-patch attention mechanism does not convincingly demonstrate effectiveness compared to the standard Transformer structure, nor does it show significant performance gains.\n\n- About main experiments: The primary experiments do not show a clear advantage over existing methods, lacking of recent baselines such as TimeXer and TimeMixer.\n\n- About analytical experiments: Since the authors claim that a key innovation of the paper is the multi-patch attention mechanism’s application to the channel and time dimensions, detailed analytical experiments are necessary. For instance, visualizations showing whether the model has learned meaningful attention patterns could substantiate the rationale and effectiveness of the design.\n\n[1] TimeXer: Empowering Transformers for Time Series Forecasting with Exogenous Variables\n\n[2] TimeMixer: Decomposable Multiscale Mixing for Time Series Forecasting"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper considers both channel and temporal dimensions in the modeling of time series.\n\n- This paper proposes a multi-patch attention mechanism to replace the standard Transformer multi-head attention by focusing on patch-level information in time series."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes an encoder-decoder architecture for time series forecasting that models the channel dimension and temporal dimension separately. It introduces a multi-patch attention mechanism to replace the standard multi-head attention to enhance the prediction performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The motivation of the method lacks clarity, and its novelty is limited. Furthermore, the performance of the proposed method does not show a clear advantage over other baselines, and there is a lack of validation of the rationale behind the design of individual model components."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "What are the advantages of the proposed method compared to CARD and other methods? For example, does it offer better efficiency or improved performance with limited data?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. A useful insight is that the authors link multi-head splitting to multi-patch splitting, seamlessly introducing their new attention mechanism.\n2. I agree that the ablation study is reasonable.\n3. The paper includes comparisons with numerous baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposed a Transformer-based model designed for multivariate time-series forecasting. The model introduces a multi-patch attention mechanism that provides two sets of patches, one is from temporal direction and other is from inter-channel direction. Subsequently, the use decoder and decoder to model them separately.\nThis paper is very clear and easy to understand."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main concern is that the proposed method does not demonstrate sufficient power, as evident from the experimental results. CARD clearly outperforms the proposed method on numerous datasets. This undermines the authors’ claim, as Sentinel introduces more complexity than CARD with added components—such as “specializing the encoder in capturing contextual information through the channel dimension.” As a result, the contribution may not be sufficient, as integrating inter-channel information effectively could likely boost independent Transformers (PatchTST).\n\n2. Although the authors review many recent papers, they fail to clearly explain their model’s advantage over existing approaches.\n\n\n3. Compared to similar ICLR papers, the experiments seem insufficient. For instance, CARD, iTransformer, and PatchTST conducted more extensive experiments, either across more datasets or with more comprehensive experimental settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024sentinel,\ntitle={Sentinel: Multi-Patch Transformer with Temporal and Channel Attention for Time Series Forecasting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zV2cgXk2aY},\nnote={under review}\n}"
},
"abstract": {
"value": "Transformer-based time series forecasting has recently gained strong interest due to the ability of transformers to model sequential data. Most of the state-of-the-art architectures exploit either temporal or inter-channel dependencies, limiting their effectiveness in multivariate time-series forecasting where both types of dependencies are crucial. We propose Sentinel, a full transformer-based architecture composed of an encoder able to extract contextual information from the channel dimension, and a decoder designed to capture causal relations and dependencies across the temporal dimension. Additionally, we introduce a multi-patch attention mechanism, which leverages the patching process to structure the input sequence in a way that can be naturally integrated into the transformer architecture, replacing the multi-head splitting process. Extensive experiments on standard benchmarks demonstrate that Sentinel, because of its ability to ``monitor\" both the temporal and the inter-channel dimension, achieves better or comparable performance with respect to state-of-the-art approaches."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Transformer",
"Time Series Forecasting",
"Attention mechanism"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/f65a45cce81f7e4d7444c234f2867eda8e799fd4.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/7aa550def6f16a44238999a3d953fbcaab9c72fe.zip"
},
"title": {
"value": "Sentinel: Multi-Patch Transformer with Temporal and Channel Attention for Time Series Forecasting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zV6D212c7Q | Masked Cross-attention Adapters Enable the Characterization of Dense Features | main | Active | image features;image backbones;ViT;instance segmentation | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;3;5 | 4;4;4;5 | 2;2;2;3 | 1;2;2;2 | 2;1;2;3 | 3.5 | 4.25 | 2.25 | 1.75 | 2 | 1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "The ablations show that a feature dimension of 8 performs better than a dimension of 16. Why only report a change in one direction? How did a feature dimension of 4 perform? Similarly, without the second cross-attention layer MAXA performs worse. Does a third cross-attention layer improve performance?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The ability to perform a meaningful cost-effective evaluation of backbones for their dense prediction capabilities is currently lacking, and as a result, only a limited number of studies offer meaningful evaluations. The proposed method is well-motivated and clearly described, making it easy to follow. The authors provide a solid introduction to the problem and include an informative related work section. Additionally, the ablation studies demonstrate that the advantages of certain design choices are consistent across multiple tasks and different pretrained backbones."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work introduces the Masked Cross-Attention Adapter (MAXA), a method designed to provide a cost-effective approach for probing transformer backbones for their dense prediction capabilities. The method employs a masked cross-attention readout layer that uses positional encodings as fixed query vectors. This is followed by a second unmasked cross-attention layer and a deconvolution network. Additionally, each attention head incorporates a learnable locality bias term during cross-attention. The authors evaluate MAXA's performance using multiple pretrained backbones (such as MAE, DINO, and DINOv2) across tasks including instance awareness, semantic segmentation, and monocular depth estimation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The relevance of a cost-effective dense prediction evaluation largely depends on its high correlation with currently optimal but more resource-intensive evaluation techniques. However, the experiments provided are insufficient to establish confidence in this correlation. While the authors evaluate MAXA across multiple tasks and backbones, they do not present a statistical analysis of its correlation with fine-tuned results, nor do they quantify the trade-off between training cost and performance compared to state-of-the-art techniques. Such context is necessary to make the presented evaluations meaningful.\n\nThe choice to follow simple FPNs and rely solely on information from the last layer appears questionable. Although Vision Transformers (ViTs) maintain the same resolution across all layers, it is doubtful that the final layer alone contains all the necessary information without fine-tuning the backbone. For instance, MAE demonstrated that linear probing performance of ViTs is not always a reliable indicator of fine-tuning performance. Furthermore, [1] showed that employing cross-attention readouts from every layer leads to significant performance improvements compared to using simple FPNs.\n\nminor: Lines 260-270 contain some incomplete sentences, such as \"SAM2 also good.\".\n\n[1] Chen, Zhe, et al. \"Vision Transformer Adapter for Dense Predictions.\" The Eleventh International Conference on Learning Representations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Please see weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Comprehensive Benchmark for Dense Vision Tasks: The paper makes a valuable contribution by introducing a comprehensive benchmark specifically designed for evaluating the dense prediction capabilities of pre-trained vision encoders, addressing a notable gap in the current landscape of benchmarks that primarily focus on classification tasks. \n- Methodological Rigor: The authors employ a rigorous methodology, including the use of masked cross-attention adapters (MAXA) to enable fair comparisons across different encoders. The choice of MAXA is well-justified, as it allows for fast training and evaluation. \n- Clarity and Insightful Presentation: The paper presents a clear and well-organized set of experiments, covering a diverse range of pre-trained models and dense tasks. The results are presented in a readily understandable and comparable manner, providing valuable insights into the relative strengths and weaknesses of different encoders for dense prediction tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores vision adapter for pretrained ViTs on dense visual tasks like Segmentation and Depth Estimation. The paper detailedly benchmark the performance with the tasks and also ablates some adapter designs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited Insight into Learned Representations: While the benchmark effectively compares the performance of different encoders, it lacks deep analysis regarding the specific representations learned by each encoder. Simply stating that \"DINOv2\" achieves the highest numbers isn't sufficient; the paper would benefit from a more in-depth investigation into the characteristics of the learned representations that contribute to performance differences. \n- Overlooking Architectural Biases: The paper does not explicitly address how architectural biases in different encoders might contribute to their performance on dense tasks. A discussion on this aspect would be valuable, as it could help disentangle the effects of pre-training from those inherent to the encoder architectures. \n- Potential Bias from MAXA: Although the authors justify the use of MAXA, the paper could be strengthened by exploring whether the findings hold consistent with other adapter methods or lightweight dense heads. This would provide further validation of the results and address potential concerns about biases introduced by the specific choice of MAXA. \n- Missing Comparisons with Key Adapter Methods: The paper lacks a direct comparison with other relevant adapter methods, such as ViT-Adapter and FeatUp. Including these methods in the evaluation would offer a more complete picture of the adapter landscape for dense prediction tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Is there more discussion on the reasons behind DINOv2's superior performance?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- MAXA can adapt to downstream tasks with only a small number of additional parameters (less than 0.3%).\n- Since the backbone network is frozen during training, MAXA achieves faster training speeds."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The article introduces a new method called Masked Cross-Attention Adapters (MAXA) for evaluating and characterizing the performance of different visual feature extractors (backbone networks) in dense prediction tasks. MAXA employs a cross-attention mechanism that enables effective feature extraction and dense prediction without relying on the size and resolution of the backbone network's output. This method introduces a learnable masking radius, allowing the model to adapt to the locality of various features, thereby achieving fast training while maintaining a low number of parameters."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Although MAXA has been evaluated across three main task categories, these categories may not fully cover all possible visual tasks.\n- There is a lack of comparisons with other fine-tuning methods, such as Adapters Strike Back.\n- The novelty is relatively weak."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "As raised the weakness section, it is highly recommended to include experiments with Hybrid CNN-Transformer architecture like ViTamin [6] and ConvNet architecture like CLIP-ConvNeXt [7]. These experiments are essential, and I would consider increasing the score if they are added during the rebuttal phase.\n\nThe authors should also address the major concerns and minor concerns detailed in the weakness section."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- (1) The idea is simple and easy to read.\n \n- (2) This research has potential to become a standard for evaluating the dense awareness of frozen features.\n \n- (3) The authors explore various aspects of the capabilities of frozen features, providing a solid basis for evaluating the adapter's effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents the Masked Cross-Attention Adapter (MAXA), a lightweight solution for making dense predictions on frozen vision backbones. By utilizing cross-attention, MAXA decouples the encoder output's resolution from the final prediction, filling a gap in the evaluation of feature extractors for dense tasks such as segmentation and depth estimation. The authors assess multiple vision backbones along three dimensions: instance awareness, local semantics, and spatial understanding, with DINOv2 emerging as the top performer across all tasks. The study emphasizes MAXA’s effectiveness in characterizing dense features while requiring minimal parameters and training effort.\n\nOverall, the concept is straightforward and well-presented. The authors explore various aspects of the capabilities of frozen features, providing a solid basis for evaluating the adapter's effectiveness. However, the paper falls short due to a limited literature review and insufficient experimental depth, which may impact its chances of acceptance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Major Weaknesses\n\n- (1) The literature cited is outdated. For instance, the authors state, “At the other end of the spectrum, using complex dense task heads, for example, Faster R-CNN (Ren et al., 2015) for object detection, adds a large number of parameters and introduces its own inductive biases.” However, Faster R-CNN is nearly a decade old. The authors should clearly differentiate their approach from more recent works like ViTDet [1], ViT-Adapter [2], and Segmenter [3] in both the introduction and related work sections, as these studies also focus on developing lightweight dense task heads. Although ViTDet is briefly mentioned in the “Experiment Design” section, this reference is insufficient for establishing the distinction.\n \n- (2) Lack comparison with zero-shot dense prediction using frozen features. Zero-shot segmentation using frozen features [4, 5] from foundation models has been extensively studied. These models [4, 5] demonstrate strong segmentation performance with training-free dense heads.\n \n- (3) The experimental comparison is insufficient. In the CLIP setting, the authors focus solely on ViT-based architectures (SigLIP for example), whereas ConvNet-based or hybrid architectures might be more appropriate for dense tasks. It is highly recommended that the authors include experiments with Hybrid CNN-Transformer architecture like ViTamin [6] and ConvNet architecture like CLIP-ConvNeXt [7]. These additional experiments are crucial, and I would consider raising the score if they are incorporated during the rebuttal phase. \n \n\n\nMinor Weaknesses\n\n- (1) In Figure 1, it is unclear how the M(q, \\sigma) is generated.\n \n- (2) Section 3 mentions that “spatial queries Q of size (HQWQ, 16)”. It is unclear why the query dimension is chosen to be 16.\n \n- (3) Concerns regarding reproducibility arise due to the unclear descriptions. Section 3 mentions that “This is realized through a small CNN operating on the output of all queries using transposed convolutions to increase spatial size.”. Please specify the details of \"small CNN\". CNN usually refer to Convolutional Neural Network consisting with convolutional layer and non-linear activation layer. Please specify the type and number of layers, and the kernal size of each convolution operator.\n\n\n[1] Li Y, Mao H, Girshick R, et al. Exploring plain vision transformer backbones for object detection[C]//European conference on computer vision. Cham: Springer Nature Switzerland, 2022: 280-296.\n \n[2] Strudel R, Garcia R, Laptev I, et al. Segmenter: Transformer for semantic segmentation[C]//Proceedings of the IEEE/CVF international conference on computer vision. 2021: 7262-7272.\n \n[3] Chen Z, Duan Y, Wang W, et al. Vision transformer adapter for dense predictions[J]. ICLR, 2023.\n \n[4] Sun S, Li R, Torr P, et al. Clip as rnn: Segment countless visual concepts without training endeavor[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024: 13171-13182.\n \n[5] Wang F, Mei J, Yuille A. Sclip: Rethinking self-attention for dense vision-language inference[C]//European Conference on Computer Vision. Springer, Cham, 2025: 315-332.\n \n[6] Chen J, Yu Q, Shen X, et al. ViTamin: Designing Scalable Vision Models in the Vision-Language Era. CVPR. 2024. https://huggingface.co/jienengchen/ViTamin-XL-384px\n \n[7] https://huggingface.co/laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024masked,\ntitle={Masked Cross-attention Adapters Enable the Characterization of Dense Features},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zV6D212c7Q},\nnote={under review}\n}"
},
"abstract": {
"value": "Learning meaningful representations is a core topic of deep learning. Throughout the last decade, many strategies for learning image representations have been proposed involving supervision and self-supervision and various data sources. \nIn most current work, evaluation is focused on classification tasks while neglecting dense prediction tasks, possibly because linear probing is more challenging in the latter case.\nFurthermore, dense prediction heads are often large and come with specific inductive biases that distort performance measurement further.\nIn this work we propose masked cross-attention adapters (MAXA), an adapter method that is capable of dense predictions independent of the size and resolution of the encoder output. This allows us to make dense predictions using a small number of additional parameters ($<0.3 $%) while allowing for fast training using frozen backbones.\nUsing this adapter, we run a comprehensive evaluation assessing instance awareness, local semantics and spatial representation of a diverse set of backbones. \nWe find that DINOv2 outperforms all other backbones tested - including those supervised with masks and language - across all three task categories. \nCode is available at https://to.be.released."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"image features",
"image backbones",
"ViT",
"instance segmentation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/d5fe288b845b6c740fd1c010b1e442b22ed537cd.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Masked Cross-attention Adapters Enable the Characterization of Dense Features"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zVagbJLgkP | VipAct: Visual-Perception Enhancement via Specialized VLM Agent Collaboration and Tool-use | main | Active | Visual Language Model;Visual Perception;Language Agent | applications to computer vision, audio, language, and other modalities | 3;5;6 | 4;5;3 | 3;3;3 | 2;3;3 | 3;3;2 | 4.666667 | 4 | 3 | 2.666667 | 2.666667 | -0.327327 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My questions are in the weakness. I will raise my score if my questions are answered."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The VIPACT framework’s use of an orchestrator agent that coordinates with specialized agents and vision expert models stands out, as it improves VLMs' performance on fine-grained visual perception tasks by enabling collaborative reasoning. This structured, modular approach allows for flexibility and extensibility, making it adaptable for a wide range of tasks.\n2. By employing System-2 reasoning, VIPACT goes beyond traditional VLM capabilities, integrating intermediate reasoning steps that are more complex and contextually rich. This approach enhances VLMs' ability to manage detailed visual information, which is crucial for tasks requiring in-depth visual analysis.\n3. The paper includes extensive experiments across multiple benchmarks, showing clear performance gains over state-of-the-art methods in diverse visual tasks. These experiments demonstrate the framework's effectiveness and generalization ability, especially in tasks that are inherently challenging for current VLMs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a novel framework aimed at enhancing vision-language models (VLMs) in handling fine-grained visual perception tasks. The VIPACT framework incorporates an orchestrator agent for task management, specialized agents for tasks like image captioning, and vision expert models for detailed visual analysis. This multi-agent approach enables the VLMs to collaborate, utilize specialized tools, and aggregate evidence, leading to improved performance on benchmarks involving intricate visual tasks. Experimental results demonstrate VIPACT's superiority over state-of-the-art models, with ablation studies underscoring the importance of each component. The paper suggests VIPACT as a scalable, flexible system for real-world visual applications"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. VIPACT relies heavily on models like GPT-4o for their advanced instruction-following and function-calling abilities. While the framework is adaptable, current results may not generalize effectively to other VLMs lacking these specific capabilities, restricting the framework's accessibility and broader applicability.\n\n2. In MMVP benchmark, how does the proposed model compared with other vision foundation model like llava, internvl, eagle and so on? Eagle also use multi-expert collaboration, can the authors compare between them?\n\n3. Did the authors use other models instead of GPT4-o? like gemini or internvl? If the performance of the proposed model mainly reply on GPT4-o, I will think the contribution is not enough."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Can the authors put an average column in Table 2 to better show the improvements of the method?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is mainly well-written and easy to follow. \n2. The paper proposes a new multi-agent framework for fine-grained visual perception tasks. \n3. The proposed framework is effective and improves the performance of two datasets over existing baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces VIPACT, a novel framework designed to address fine-grained visual perception tasks, where current Vision-Language Models (VLMs) often underperform. VIPACT utilizes a collaborative multi-agent approach that integrates a central orchestrator agent with specialized agents and vision expert models to achieve more precise and detailed visual understanding.\n\nContributions:\n1. Multi-Agent Collaboration Framework: VIPACT employs an orchestrator agent that manages task analysis, planning, and evidence aggregation, coordinating specialized agents to tackle specific tasks like image captioning and depth estimation. Specialized agents provide focused capabilities (e.g., object detection, depth estimation), enhancing VLM performance in tasks requiring detailed visual analysis.\n2. Comprehensive Benchmarking: VIPACT outperforms state-of-the-art baselines on challenging visual perception benchmarks, including Blink and MMVP.\n3. Ablation and Error Analysis: Through ablation studies, the paper highlights the importance of each component in VIPACT, while error analysis reveals specific limitations of current VLMs in spatial reasoning and fine-grained visual tasks, underscoring areas for further improvement in multi-modal perception systems."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The framework is tested on only one LLM. Testing on more, e.g., Claude / Gemini, would be more convincing and show the generalization of the framework.\n2. The method proposed has limited contribution to the community. There have been multiple papers proposing/applying \"LLM with visual tool use\" to solve vision tasks. The multi-agent framework has also been verified to be effective on various downstream tasks. \n3. The performance of the proposed framework does not seem significant enough. On the Blink dataset, the improvement over CoT is less than 10 %. The improvement in the MMVP dataset is marginal."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What if the authors replace the visual input to the orchestrator agent with a detailed description of it? Can this textual input achieve a satisfactory performance compared with using the original image? The authors may conduct an ablation study comparing performance across three conditions: 1) original image input, 2) detailed textual description input, and 3) both image and textual description as input. This would help isolate the unique contributions of visual versus textual information to the orchestrator agent's performance.\n\nThe reviewer would like to change the rating if the concerns are all resolved."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors utilized a VLM-based programmatic framework to perform difficult vision tasks challenging for most existing VLMs.\n2. The description of the ViaAct pipeline is clear and easy to understand.\n3. The experiments are detailed and comprehensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes VipAct, a VLM-based agent framework capable of performing challenging visual perception tasks by decomposing the tasks into sub-tasks which specialized agents and expert models then overtake. The VipAct framework is designed as a hierarchical pipeline, with a VLM as the orchestrator agent and specialized VLMs and expert vision models as the tools called by the orchestrator. Unlike existing visual programming methods (e.g., VisProg and MM-ReAct), ViaAct takes the vision task as its input and the input image. This innovation enables VipAct to decompose the high-level task into visually grounded and feasible subtasks that can be submitted to expert models. Ablation studies justify the introduced components, i.e., multiple agents, visual input, and vision experts."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. It is still unclear how including the input image in the prompt to the orchestrator agent improves the fine-grained visual perception capability, although the experimental results show that removing this visual input leads to a performance decrease. At the beginning of the paper, the authors stated that \"recent studies reveal that state-of-the-art (SOTA) VLMs continue to struggle with fine-grained or low-level visual perception tasks that are trivial for humans\" (L43). Based on this premise, the orchestrator agent is also not capable of calling proper tools as which tools with fine-grained perception functionalities are to be called is also based on the fine-grained perception of the input image. The authors are expected to present more vivid examples of how the agent's behavior differs with and without visual input, which would help clarify this point.\n\n2. Although the authors attempt to conduct fair comparisons, one key aspect - the tool sets used by the compared methods - is not fair enough. L400 states that \"Another limitation is their inability to support images with visual prompts, preventing them from locating visual prompts and proceeding with subsequent operations\". The compared methods were not given the necessary vision expert models to complete the tasks, thereby obtaining inferior results. The reviewer believes that the utilization of the vision experts, such as Visual Prompt Detector, is not part of the paper's innovations and that the compared methods should also be equipped with these vision experts in the experiments. The authors are expected to conduct additional comparisons where baseline methods are equipped with identical tools, or explain why such comparisons might not be feasible or relevant. This would provide a clearer picture of VipAct's unique contributions beyond the use of vision expert models."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose VipAct, a VLM-based agent framework that incorporate multi-agent collaboration and vision expert models to enhance fine-grained visual perception."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024vipact,\ntitle={VipAct: Visual-Perception Enhancement via Specialized {VLM} Agent Collaboration and Tool-use},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zVagbJLgkP},\nnote={under review}\n}"
},
"abstract": {
"value": "While vision-language models (VLMs) have demonstrated remarkable performance across various tasks combining textual and visual information, they continue to struggle with fine-grained visual perception tasks that require detailed pixel-level analysis. Effectively eliciting comprehensive reasoning from VLMs on such intricate visual elements remains an open challenge. In this paper, we present VipAct, an agent framework that enhances VLMs by integrating multi-agent collaboration and vision expert models, enabling more precise visual understanding and comprehensive reasoning. VipAct consists of an orchestrator agent, which manages task requirement analysis, planning, and coordination, along with specialized agents that handle specific tasks such as image captioning and vision expert models that provide high-precision perceptual information. This multi-agent approach allows VLMs to better perform fine-grained visual perception tasks by synergizing planning, reasoning, and tool use. We evaluate VipAct on benchmarks featuring a diverse set of visual perception tasks, with experimental results demonstrating significant performance improvements over state-of-the-art baselines across all tasks. Furthermore, comprehensive ablation studies reveal the critical role of multi-agent collaboration in eliciting more detailed System-2 reasoning and highlight the importance of image input for task planning. Additionally, our error analysis identifies patterns of VLMs' inherent limitations in visual perception, providing insights into potential future improvements. VipAct offers a flexible and extensible framework, paving the way for more advanced visual perception systems across various real-world applications."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Visual Language Model",
"Visual Perception",
"Language Agent"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ecb68c37250643737b3f7095426e3293ad436ed2.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "VipAct: Visual-Perception Enhancement via Specialized VLM Agent Collaboration and Tool-use"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zVp0TVDkrX | CaPulse: Detecting Anomalies by Tuning in to the Causal Rhythms of Time Series | main | Active | Time Series Anomaly Detection | learning on time series and dynamical systems | 3;5;5;6 | 4;4;4;5 | 2;2;2;2 | 2;1;2;3 | 1;3;2;3 | 4.75 | 4.25 | 2 | 2 | 2.25 | 0.662266 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Please address what described in weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper makes an effort to integrate multiple algorithmic building blocks into a learning system."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes an anomaly detection framework that combines several existing algorithmic building blocks, including normalized flows, FFTs, and patch-based masking. The overall objective is to maximize the log-likelihood, which appears to produce a latent representation attributed to causal factors."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "It seems that the learning problem could benefit from a clearer explanation. The objective function appears to be conditioned by $ C_\\text{ind} $ and $C_0$, but the criteria for selecting these values are not immediately obvious. This aspect is essential, especially when considering unsupervised learning tasks with latent variables. It is possible that a two-stage optimization strategy, where $C$ parameters and other model parameters are optimized in an alternating fashion, has been implemented, though a detailed description of this approach does not seem to be readily available. At the very least, it would be helpful if the parameters to be learned were clearly specified.\n\nIn my attempt to understand the algorithm’s operation and the rationale behind its design choices, I encountered a few challenging aspects. These include, for example, the use of FFT in Eq. (1), the orthogonality condition on $ C_\\text{ind} $ and its connection to causal learning, and the role of the \"pyramid\" structure, which may be intended for multi-scale convolution across spatial and temporal dimensions. There are various possible approaches for identifying independent causal factors or periodic patterns at different granularities. In general, a well-written paper typically provides some technical rationale when selecting a specific approach. At present, there appears to be a slight disconnect between the authors’ intended objectives and the selected algorithmic components. For instance, it remains somewhat unclear why the resulting subspaces would lend themselves to causal interpretation or how the noise injection approach contributes to distinguishing confounders.\n\nGiven these considerations, I find it somewhat challenging to fully assess the framework’s novelty at this stage. I am inclined to suggest that additional development and clarification may be beneficial for the paper to reach its full potential for publication."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How does the computational complexity scale with time series length and dimensionality?\nWhat are the computational requirements during training and inference?\nHow is the optimal hyperparameter configuration (Appendix E) for each dataset established?\nCan you provide a comparison with more parameter-efficient baselines?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper's main strength is its integration of causal inference with time series anomaly detection and proposed solution for handling multiple periodicities. This method offers some degree of interpretability through SHAP. The authors do establish strong theoretical foundation for their method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes CaPulse a method for time series anomaly detection that leverages causal inference. It introduces a structural causal model to understand anomaly generation, combines it with periodical normalizing flows for density estimation. The paper purports to address key challenges including label scarcity, data imbalance, and multiple periodicities."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In my view the main weakness is with the empirical evaluation. The proposed method is extremely complex, likely computationally demanding with a large number of hyperparameters. The authors do perform some sensitivity analysis but it is limited. The baselines used for the empirical comparison relies exclusively on similarly complex baselines. The authors do not compare to simple, algorithmic baselines or methods such as Matrix Profile which have proven to outperform state of the art at a computational cost several orders of magnitude smaller. Without such comparisons, it is impossible to assess whether the complexity of the method is justified."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q1: Why were ablation experiments only conducted on two datasets in Table 2, and what were the effects on the other datasets?\n\nQ2: Causal inference and frequency domain-based methods have already been proposed before. Your method doesn’t seem to have anything novel compared to existing methods. It seems like you are just combining them.\n\nQ3: In Table 1, the methods you compare with are not the best current methods. There is a lack of comparison with the latest methods."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "S1. Time series anomaly detection is important to various domains.\n\nS2. There are quite a few nice illustrations.\n\nS3. This work focuses on an important problem that could have real-world applications.\n\nS4. The figures and tables used in this work are clear and easy to read."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a structural causal model (SCM) to understand the generation process of anomalies in time series data. CaPulse leverages causal tools to identify the true underlying causes of anomalies, enhancing interpretability and generalization capabilities. Additionally, it employs Periodical Normalizing Flows (PeNF) with a novel mask mechanism and attention mechanism to capture multi-period dynamics. This enables the model to effectively address the challenges of label scarcity, data imbalance, and complex multi-periodicity in TSAD."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. The paper conducts ablation experiments solely on two datasets, as shown in Table 2. This narrow focus raises concerns about the generalizability of the findings. A more comprehensive analysis involving additional datasets could provide valuable insights into the method's performance and limitations.\n\nW2. The approach presented appears to lack novelty, as it primarily builds upon established methods of causal inference and frequency domain analysis without offering significant advancements. Instead of innovating, the proposed method seems to merely combine existing techniques. \n\nW3. The comparative analysis in Table 1 is limited, as the authors do not engage with the most advanced and relevant methods currently available in the literature. The selection of baseline models is inadequate, as it overlooks several cutting-edge techniques that could offer a more rigorous benchmark. To strengthen their evaluation, the authors should include comparisons with a wider array of state-of-the-art anomaly detection algorithms, thereby providing a clearer context for assessing the performance of their proposed method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. As mentioned in the paper, I wonder if some non-causal factors(U) such as “user malfunction” or “data collection jitter” can also be considered as causal factors depending on the domain?\n2. What is the rationale for augmenting the raw input time series in the pipeline of CaPulse? Does the method of augmentation influence the performance of the entire framework? Or would it be more helpful to have multiple ways of augmenting instead of just one? Is it enough to simulate real-world disturbances?\n3. How to determine an anomaly judgment based on an anomaly score? Is there a threshold?\n4. What is the meaning of Figure 6c? What was the author trying to express?\n5. In Figure 6a, I don't understand why CaPulse is the only one that can accurately predict the anomalies because I don't know why they are anomalies through the time-series plot."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The CaPulse framework presents a novel approach to time-series anomaly detection by introducing causal inference. In this point, this work is different from traditional deep learning-based methods.\n- Another innovative aspect is the introduction of Periodic Normalizing Flows(PeNF) with a mask mechanism for periodicity awareness. This approach is particularly well-suited for time series with complex multi-periodicity, enhancing both anomaly detection performance and interpretability.\n- This paper provides empirical evidence to support the claims, with interpretability analysis."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed CaPulse, which is a new causality-based framework for time series anomaly detection. The framework includes PaCM, MpCF moduls for causal treatment and multimple periodicities. This approach is periodicity-aware and density-based anomaly detection. Unlike traditional approaches that may fail to capture the underlying mechanisms of anomaly generation, CaPulse builds a structural causal model to understand the root causes of time-series anomalies. The experiments show better accuracy and interpretability than exiting methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- From section 3, a causal view of TSAD includes hard assumptions that might not hold in various real-world settings.\n- The limited number of baselines and benchmark datasets.\n- There is no friendly explanation for the interpretability plot, especially in Figure 7."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024capulse,\ntitle={CaPulse: Detecting Anomalies by Tuning in to the Causal Rhythms of Time Series},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zVp0TVDkrX},\nnote={under review}\n}"
},
"abstract": {
"value": "Time series anomaly detection has garnered considerable attention across diverse domains. While existing methods often fail to capture the underlying mechanisms behind anomaly generation in time series data. In addition, time series anomaly detection often faces several data-related inherent challenges, i.e., label scarcity, data imbalance, and complex multi-periodicity. In this paper, we leverage causal tools and introduce a new causality-based framework, **CaPulse**, which *tunes in* to the underlying *causal pulse* of time series data to effectively detect anomalies. Concretely, we begin by building a structural causal model to decipher the generation processes behind anomalies. To tackle the challenges posed by the data, we propose Periodical Normalizing Flows with a novel mask mechanism and carefully designed periodical learners, creating a periodicity-aware, density-based anomaly detection approach. Extensive experiments on seven real-world datasets demonstrate that CaPulse consistently outperforms existing methods, achieving AUROC improvements of 3% to 17%, with enhanced interpretability."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Time Series Anomaly Detection"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/246112718d84360a14b540723c97d2c5fce7d4b7.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "CaPulse: Detecting Anomalies by Tuning in to the Causal Rhythms of Time Series"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zVtwIWyX4S | MaxSup: Fixing Label Smoothing for Improved Feature Representation | main | Active | Label Smoothing;Regularization;Representation Learning;Explainability | learning theory | 5;5;5;5 | 4;5;4;4 | 3;3;3;2 | 2;2;3;2 | 2;2;1;3 | 5 | 4.25 | 2.75 | 2.25 | 2 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the ablation (Table 1), The paper showed removing the Error-Enhancement is beneficial. Why is that not compared/proposed as inplace for replacement?\n2. Why is the error-enhancement term having a huge negative impact? The logits of the correct class are generally higher while training.\n3. How does it impact the inter-class relationship?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is easy to read and well-written.\n2. Evaluation is performed on multiple modalities to show its application.\n3. An in-depth analysis of the problem of label smoothing was done."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a label regularization method called Max Suppression. It shows that vanilla label smoothing has two components: regularization and error-enhancement term. The error enhancement term has negative effects, and the paper proposes an alternate formulation that mitigates its effect by replacing z_gt with z_max."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper fails to explain the reason behind why replacing z_gt with z_max is the optimal choice.\n2. The targets created by the approach are not a probability, and hence, using it with cross-entropy loss may not be correct.\n3. Figure 2 is very confusing and difficult to understand.\n4. The paper compares the approach across different modalities but should also include results with multiple datasets from the same modality and various architectures (Like done in OLS [2] and Zipf [3])). A lot of the tables could be easily merged to make space for more results.\n5. Label Smoothing is known to affect semantic class relationships [1]. The baseline approaches (OLS [2], Zipf [3]) also formulate their loss function to mitigate this, but it is not handled in the proposed approach.\n6. In Tables 4 and 5, comparison is only done with the vanilla label smoothing. The improvement is minimal. It would be interesting to see the results of baseline approaches on them."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper presents interesting analysis into a deeper understanding of label smoothing, which I appreciate. This understanding is then leveraged to propose a new approach.\n- The proposed approach is very simple, and seems to perform well empirically, making it a strong candidate for a drop-in replacement/improvement on label smoothing. The **simplicity is a big plus**, as it has the potential for wider adoption and impact. Moreover, the supporting analysis seems to be solid, reducing the likelihood of the performance failing to generalise beyond the experiments in the paper."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyses the effect of label smoothing for image classification. By analysing the loss they find that label smoothing (compared to vanilla cross entropy) encourages the model to be more confident when it is incorrect. Based on this finding they propose MaxSup, a similarly simple regularisation approach that penalises the max logit rather than the logit corresponding to the ground truth label. They then demonstrate consistent improvements over label smoothing in terms of test accuracy on a range of experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although I have tried my best to cover the content in enough detail, due to time constraints, I may have misunderstood/missed aspects of the paper. I encourage the authors to correct/clarify this if it is the case. I also welcome the authors to challenge me if they disagree with some of the more subjective points I make. I will actively engage during the rebuttal period.\n\n1. Positioning relative to existing/concurrent work. Overall, the paper lacks appropriate acknowledgement to existing work relating to label smoothing. \n 1. The empirical notion that label smoothing results in more confident errors has been around since 2022 [1], whilst the authors claim this to be a completely novel revelation. Moreover, the type of analysis performed in this paper is also present in [2]. Although [2] is not peer reviewed, and addresses a different problem, it contains substantial overlap with section 3 of the paper. In particular how the logits are suppressed differently depending on whether a prediction is likely to be correct or not. Note that I do not suspect any plagiarism, however, as existing/concurrent work, the above references should be clearly acknowledged and contextually placed in relation to this work. Although it is somewhat grey, I would deem [2] to be concurrent work as it is not published, but has been available on arxiv since March of this year. The authors should accordingly temper any claims of \"previously unknown/novel\".\n 2. The authors also do not reference a number of relevant background work investigating label smoothing in the context of knowledge distillation [3,4,5], which are particularly concerned with the \"erasure of knowledge\". [6], which empirically shows label smoothing degrades transfer learning and also empirically measures increased tightness and separation of feature clusters, is an especially important reference but is also missing. Similarly, works that investigate the effect of LS on neural collapse also measure these empirical properties [7,8]. The authors should add these papers to the related work. [6] also examines a regulariser that minimises the l2-norm of the logits (logit penalty), which is conceptually similar to MaxSup since the l2-norm tends to be dominated by the max logit (or MaxSup is like minimising the infinity-norm). Ideally, this would be added to Tabs. 2 and 3 although at the least it should be clearly referenced and discussed in the paper.\n2. Lacking analysis. Although it is somewhat intuitive that encouraging confidence on errors would hurt generalisation, the additional analysis that is performed is somewhat lacking.\n 1. The authors analyse the inter-class separability and intra-class variation of feature representations, however, they use only the illustrative plots found in [9], which are not very scientific. Since [9] was published in 2019 there have been a number of works that directly quantify separability/variation (e.g. [6,7]). It would greatly strengthen this part of the work if the authors were to provide such numerical measures over the training and test datasets. Moreover, there is a lack of comparison to vanilla CE, which should be the real baseline.\n 2. The discussion in Sec. 3.3 is somewhat handwavy. As the visualised data is primarily *training* data, better class separation doesn't necessarily mean better accuracy on *test* data. There is also no reference/experiment to support the claim that greater intra-class variation leads to improved representation learning. The authors should use [6] as a reference in the context of these claims, as in [6] they indeed show empirical results similar to the authors claims by comparing the test accuracy vs transfer linear probing (representation learning) performance of label smoothing. Additionally, the submission would be strengthened with a transfer learning experiment (like in [6] Tab. 1). It would be great if it can be shown that MaxSup is able to improve test accuracy without sacrificing transfer performance (unlike label smoothing). The semantic segmentation experiment seems to do this, however, it is unclear whether or not the downstream segmentation loss also includes MaxSup/LS and the experiment lacks a vanilla CE baseline (which the authors should include).\n 2. It is not really clear how the behaviour in Sec 3.3 follows from the analysis in 3.1/3.2. The authors ideally should be able to link the theory with regards to the different training losses to the difference in observed separation/variation. This, again, would greatly improve Sec 3.3. \n 2. The paper would benefit from some additional explicit discussion on how increasing confidence on training errors would lead to worse generalisation in terms of top 1 test accuracy. Although it seems to make sense superficially, in my mind there is still a gap in reasoning. For example adding a sentence like: LS weakens the strength of supervision to the GT class on errors, when a model really should be learning to correct itself rather than being regularised to prevent overfitting. \n3. Presentation, missing information and clarity.\n 1. The alpha schedule in Tab. 6 comes out of nowhere. It seems to be referenced after Eq. 7 even though it does not appear there. This seems to just be sloppy writing and needs to be clarified properly. Although Tab. 6 suggests that an alpha schedule is generally beneficial, it is orthogonal to the main body of the paper. I think the paper would benefit either from an expanded (and separate) discussion and analysis on the alpha schedule, or just simply from its removal from the paper.\n 2. The wording and notation in Sec. 3.1 is confusing. From line 124 onwards the use of \"cross entropy\", \"label smoothing\" and \"loss\" are combined in different ways to refer to distinct things that are easily confused. For example, the \"cross entropy loss with label smoothing\" is not the same thing as the \"label smoothing loss\" or the \"cross entropy between two distributions\" but all are related and interlinked. I would like to see the authors improve the clarity of Sec 3.1 with updated wording and maybe some additional underbraces to the equations (like in Eq. 6). Sec 3. could also benefit from an illustrative figure for LS vs MaxSup (e.g. using a bar chart to represent logits), although this is not essential.\n 3. Fig. 2. is difficult to interpret. It is hard to compare when it is not clearly labelled on the figure which row is LS and which is MaxSup. The decision boundaries arguably don't contribute to the takeaways so the diagrams may be easier to interpret if they are omitted. The choice of showing test and training data together on the same plot without easily being able to tell the difference without pixel peeping is also poor (and this also doesn't contribute to the takeaways about variation/separation). Finally, I am generally lukewarm on these plots (as mentioned in 2.) as their axes can easily be linearly scaled to visually tell different stories and it is hard to normalise for this scaling (for example Muller [9] scale their axes but the authors in this paper do not). As mentioned before, numerical measures [6,7] may convey the point more clearly.\n 3. Although I like the CAM figures, the authors should add a comparison to the baseline cross entropy model as well. The figures would also benefit from subcaptions describing what to look out for, e.g. \"label smoothing fails to consider the tail of the monkey\".\n 4. Overconfidence in the context of model calibration and overconfidence in the sense of error enhancement are not clearly delineated. LS often reduces the former [9,10], but in this paper the authors show that LS increases the latter. The authors should make clear the difference between the two definitions of overconfidence to avoid confusion. This is important since LS is widely known to reduce calibration overconfidence [9,10].\n 5. Generally speaking, there are a number of grammar and spelling errors. The authors should use an automated grammar checking tool to correct these.\n4. Choice of Deit as a model. This choice of model as a baseline to perform ablations and analysis is odd. Deit uses hard label distillation on an additional token during training. Although this doesn't necessarily conflict with the theory presented in the paper, it does add unnecessary complexity to the experiments in my opinion. Additionally, it is unclear when and where Mixup/Cutmix are used. To me it makes a lot more sense to perform a simple analysis of vanilla CE vs label smoothing, rather than also throwing in all the additional label augmentations that come with Deit by default that may muddy the results (cutmix, mixup, hard-label distillation). Although this is a comparatively minor complaint, I would prefer it if the authors simply used ResNet-50 for Tab. 1 and Fig. 2.. Alternatively, the authors can add the Deit distillation into the theory/discussion of Sec 3.\n\nI'd like to note that I think the **core of the paper is strong**, especially the proposed MaxSup, and am **keen to increase my score**. However, I believe that there are many issues with the manuscript in its current state that need to be addressed before it is ready for publication. I look forward to updates from the authors. Hopefully, the paper will improve via the review process.\n\n[1] Zhu et al. Rethinking Confidence Calibration for Failure Prediction, ECCV 2022\n\n[2] Xia et al. Understanding Why Label Smoothing Degrades Selective Classification and How to Fix It, 2024\n\n[3] Yuan et al. Revisiting Knowledge Distillation via Label Smoothing Regularization, CVPR 2020\n\n[4] Shen et al. Is Label Smoothing Truly Incompatible with Knowledge Distillation: An Empirical Study, ICLR 2021\n\n[5] Chandrasegaran et al. Revisiting Label Smoothing and Knowledge Distillation Compatibility: What was Missing? ICML 2022\n\n[6] Kornblith et al. Why Do Better Loss Functions Lead to Less Transferable Features? NeurIPS 2021\n\n[7] Zhou et al. Are All Losses Created Equal: A Neural Collapse Perspective, NeurIPS 2022\n\n[8] Guo et al. Cross Entropy versus Label Smoothing: A Neural Collapse Perspective, 2024\n\n[9] Muller et al. When Does Label Smoothing Help?\n\n[10] Liu et al. The Devil is in the Margin: Margin-based Label Smoothing for Network Calibration, CVPR 2022"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weakness for details."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tThis work reports both CV and NLP results, which is a positive aspect.\n2.\tThis method sounds sensible.\n3.\tThis work presents several theorems to introduce the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper advances the Label Smoothing technique and presents a Max Suppression method. Extensive experiments on image classification and neural machine translation tasks verify the effectiveness of the proposed method. Also, this work provides several theoretical analyses to introduce the proposed method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tWhile the method proposed in this work offer some novel ideas, the overall contribution seems to be more of an engineering refinement. \n\n2.\tThe experimental results (e.g., Tables 3 and 4) do not show a substantial improvement over existing approaches. \n\n3.\tIn my opinion, the primary reason for these experimental results is that only a minor enhancement was made to the Label Smoothing technique, without fundamentally overcoming its inherent limitations or proposing a more robust method. To advance in this direction, the focus should be on exploring the relationships between categories to derive more accurate soft-label encodings for each category, thereby addressing the inherent limitations of Label Smoothing.\n\n4.\tProviding two specific examples of Eq. (1) and Eq. (8) would help in understanding the difference between Label Smoothing and Max Suppression.\n\n5.\tLines 299-302: Providing the quantitative results regarding inter-class separability and intra-class variation would be beneficial in better elucidating Figure 2.\n\nSmall issues:\n\n1.\tLine 87: calibrated classifiers M¨uller et al. (2019). -> calibrated classifiers (M¨uller et al. 2019).\n\n2.\tWhich dataset was used in Figure 2?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "n/a"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Decomposing the Label Smoothing Loss to two terms is interesting and removing the error-enhancement term sounds reasonable. \n\n2. The experiments show that MaxSup is better than the LS, validating the effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper reveals that Label Smoothing Loss can be decomposed into two opposing components: a regularization term and an error-enhancement term. Preliminary studies confirm that the performance improvements from Label Smoothing Loss are solely due to the regularization term. Based on this insight, MaxSup is proposed to eliminate the error-enhancement term for incorrect predictions. Experiments are conducted on Image Classification and Machine Translation."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation of MaxSup is unclear. In Section 3.1, Label Smoothing Loss is decomposed into two components: the regularization term and the error-enhancement term. Preliminary studies validate that using only the regularization term is effective. If so, why not directly minimize the regularization term as the learning objective?\n\n2. The incorrect prediction samples may bring noise to MaxSup during optimization. \n\n3. Can you provide the result of using only the regularization term in Table 2 and Table 3?\n\n4. Figure 2 shows a few samples that are correctly classified by the proposed method but incorrectly by the baseline. Are there any samples that are correctly classified by the baseline but incorrectly by the proposed method?\n\n5. Some notations are confusing: \n\n a. What is $q$ in Lemma 3.2? The definition of $q$ is found in the Appendix.\n\n b. Where is the $\\lambda$ in Eq. (7)? \n\n6. Some typos, e.g., Line 266, \"Similarity:\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024maxsup,\ntitle={MaxSup: Fixing Label Smoothing for Improved Feature Representation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zVtwIWyX4S},\nnote={under review}\n}"
},
"abstract": {
"value": "Label Smoothing aims to prevent Neural Networks from making over-confident predictions and improve generalization.\nDue to its effectiveness, it has become an indispensable ingredient of the training recipe for tasks such as Image Recognition and Neural Machine Translation. Despite that, previous work shows it encourages an overly tight cluster in the feature space, which `erases' the similarity information of individual examples, resulting in impaired representation learning. By isolating the loss induced by Label Smoothing into a combination of a regularization term and an error-enhancement term, we reveal a previously unknown defect, i.e., it indeed encourages classifiers to be over-confident, when they make incorrect predictions. To remedy this, we present a solution called Max Suppression (MaxSup), which consistently applies the intended regularization effect during training, independent of the correctness of prediction. By visualizing the learned features, we show that MaxSup successfully enlarges intra-class variations, while improving the inter-class separability. We further conduct experiments on Image Classification and Machine Translation tasks, validating the superiority of Max Suppression. The code implementation is available at [anonymous repository](https://anonymous.4open.science/r/Maximum-Suppression-Regularization-DB0C)."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Label Smoothing",
"Regularization",
"Representation Learning",
"Explainability"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7308e4a099d5af63aededf1cd6ad6c6c0645878d.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MaxSup: Fixing Label Smoothing for Improved Feature Representation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zWASuY0t6o | Focus On This, Not That! Steering LLMs With Adaptive Feature Specification | main | Active | instruction tuning;LLMs;spurious correlations;robustness;distribution shift;bias | foundation or frontier models, including LLMs | 3;3;5;8 | 3;3;3;4 | 2;2;3;3 | 1;2;2;3 | 3;3;3;3 | 4.75 | 3.25 | 2.5 | 2 | 3 | 0.916949 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can the authors clarify on the difference between FIT and IT? More specifically how is the $I_{focus}$ included in the data? Is this included for the IT comparison, or is the data left out? I believe including this data as part of $x$ would be a necessary comparison as well."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The proposed work targets improving robustness to spurious correlations. Improving robustness to spurious correlations is an important problem that has been well studied in classification problems and is also important to study for more recent LLMs. \n- The paper is overall well-written. I appreciate the authors have highlighted the key takeaways one act experiment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Focus Instruction Tuning, a finetuning strategy for focusing LLMs on specific features. The method is applied to classification settings with spurious correlations. Results shown on three tasks indicate for potential to mitigate bias over traditional finetuning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The experiments for the paper consider older tasks including NLI and sentiment. These datasets are old, and there are already many existing approaches for reducing bias in these datasets which the authors have not considered in this work. I recommend the authors to compare against existing approaches for example: https://aclanthology.org/2020.acl-main.769/ and https://aclanthology.org/2022.acl-long.190.pdf. Further the author could consider more recent benchmark tasks that the models are typically evaluated on as even the included QA dataset is a few years old (2022).\n- The proposed method is limited to classification in the experiments, however there are also many tasks these LLMs are capable of including general QA, MCQ, generation, etc. which may have bias and also important to study.\n- The evaluations that are done do not seems to be consistent with prior works as they separate based on the label for computing accuracy. Could the authors also include the full accuracy on the datasets for comparison with prior work?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See my comments on the weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper is well written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose an instruction fine-tuning method called Focus Instruction Tuning (FIT), designed to guide large language models (LLMs) to prioritize causal features while disregarding spurious ones. Specifically, the authors construct an instruction fine-tuning dataset based on Equations (1) and (2), categorizing the desired output labels into four groups. The concurrence rate between spurious features and labels is adjusted at various levels to control the difficulty of the test set. In the experiments, the authors evaluate the model’s focus instruction capability, denoted as $\\hat{y}\\sim p_{\\theta}(y|I,I_{text},x)$, representing the probability of predicting the label given the focus instruction. The metric $A_{focus}$ is used to compare different methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have several concerns about this paper:\n\n1. It appears that both training and evaluation require prior knowledge of spurious features (relative to a given model), which may be challenging in practical applications.\n\n2. This approach primarily enhances instruction-following in a narrow domain — specifically, where the model learns to \"focus on X while ignoring Y\" — rather than truly mitigating spurious correlations. After training, the model still relies on \"focus and ignore\" instructions to control its output. \n\n According to my experiment, part of the spurious correlation problem originates from the supervised fine-tuning (SFT) data itself, highlighting the importance of data quality and diversity, as emphasized in technical reports for models like LLaMA and Nemotron. For instance, Nemotron achieves this by applying a Cartesian product over task diversity (e.g., open Q&A, writing), topic diversity (e.g., STEM, humanities), and instruction diversity (e.g., JSON output, yes-or-no answers). FIT seems to add only a single dimension of variation (\"focus and ignore\") to the SFT data.\n\n3. While FIT demonstrates an improvement in the model's ability to follow \"focus and ignore\" instructions, this is expected given that the model is specifically trained on such patterns. However, it is unclear how this affects other instruction-following abilities — do they remain stable, or do they degrade? Can author demonstrate that?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide more insight into automating feature selection for focus tuning? How does FIT handle feature ambiguity in complex datasets?\n2. How does FIT compare empirically with existing context distillation methods, and what unique improvements does it provide over these methods?\n3. Are there additional strategies the authors could explore to enhance FIT’s generalization on overlapping features, especially in high-dimensional feature spaces?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. **Originality**: FIT's feature-based focus approach offers flexibility in controlling LLM responses based on specified attributes, advancing traditional instruction tuning techniques. However, this technique in my opinion can be viewed as a variant of context distillation, which limits the originality of this paper. \n2. **Quality**: The experiments are comprehensive, encompassing various LLM models, datasets, and evaluation conditions. The robustness of FIT against spurious correlations and distribution shifts is well-documented, establishing the method's adaptability and efficacy.\n3. **Clarity**: The methodology is detailed and clear, with illustrative examples and visualizations that support understanding of FIT’s impact. However, further contextualization of feature selection strategies could enhance comprehension.\n4. **Significance**: FIT’s potential to mitigate biases and enable feature-based focus presents meaningful advancements in applying LLMs in ethically sensitive areas, such as fair NLP applications and explainable AI."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Focus Instruction Tuning (FIT), a method designed to guide Large Language Models (LLMs) in emphasizing specific features or disregarding others during task execution. FIT addresses limitations in traditional Instruction Tuning (IT) by training LLMs to avoid spurious correlations and biases, thereby enhancing model robustness and fairness. This approach enables adaptive focus on task-relevant features, leading to improved performance in scenarios involving distributional shifts and unfamiliar contexts. Empirical evaluations across multiple NLP tasks demonstrate FIT's effectiveness in managing various spurious and causal features and in mitigating biases."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Overlap with Context Distillation**: FIT shares similarities with context distillation methods, potentially limiting its distinctiveness and raises the question of where FIT truly diverges from established context distillation approaches. For instance, studies such as Snell et al. (2022) on learning by distilling context (https://arxiv.org/abs/2209.15189) explore related concepts. I first saw context distillation in Anthropic’s first paper: https://arxiv.org/abs/2112.00861 So I think it’s probably fairly well-known at this point. Additional comparisons with established context distillation methods could clarify FIT’s unique contributions. The authors may benefit from clarifying these distinctions to strengthen the paper's contributions and make its novel aspects more apparent.\n2. **Dependence on Human Guidance**: FIT relies on human intervention to determine which features are task-relevant or spurious, which may not be feasible in highly dynamic or uncertain domains. Automation in identifying these attributes would improve applicability.\n3. **Generalization Challenges**: The model's ability to generalize across unseen, highly overlapping features is noted as a limitation. More discussion on addressing this would help clarify FIT’s potential for broader generalization."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. I find it surprising that certain models able to follow Focus(C) + Ignore(S) instructions even under vanilla SFT, across all test conditions on the SS dataset (e.g. fig. 2, Mistral, SFT)? Could the authors elaborate on this, and possibly temper the claims in lines 360--362 as a result?\n2. Similarly, could the authors elaborate how FIT can improve accuracy on the null focus instruction set (e.g. fig 4., Mistral, SFT vs. FIT)? I would have imagined there should be no change here?\n3. Could the authors clarify at which stage FIT is applied? Is it used in place of existing instruction tuning, or a second stage after standard instruction tuning? As a broader question, do the authors have any intuition about a) whether FIT could be applied to downstream chat tuning, or b) whether chat tuning might undo some of the success of FIT?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- FIT is a simple, intuitive, and effective idea.\n- The paper is well-written and clear. The diagram in fig 1 and the remaining figures are clearly presented.\n- Over the three models and three evaluations presented, FIT appears to be effective (though see my questions below), showing an improvement over the standard IT baseline.\n- The paper presents reasonably thorough evaluations. Manipulating the extent of the spurious correlation, and which label it is correlated with, is a particularly nice addition.\n- Fine-tuning to actively focus on the spurious feature and ignore the causal feature provides an interesting control and highlights the flexibility of the method.\n- The ablations in section 5 demonstrate that the model does not overfit to the specific focus prompts used during training."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Motivated by the observation that instruction-tuned LLMs tend to overly rely on spurious features, this paper proposes a new method, FIT, for instruction tuning LLMs while focusing on or ignoring certain features in the input. The method is a simple, intuitive modification to standard instruction tuning to include an additional focus instruction, such as \"Direct your attention solely to X\" or \"Exclude Y entirely from your evaluation.\" Models trained with FIT can then be flexibly guided to focus on or ignore certain features at inference time, by modifying the prompt in a similar way. The authors evaluate FIT on three models across three datasets, presenting promising results for reducing over-reliance on spurious correlations and mitigating bias involving demographic attributes. The authors also show that models trained with FIT can also generalize to focus prompts other than those in the training data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "There are a number of weaknesses that at the very least warrant some discussion.\n1. Training using FIT requires access to ground-truth information about which features are spurious and which are causal, or group attributes in a fairness setting. These are often unavailable, or undesirable to collect. How do the authors expect FIT to perform if, for example, trained on a dataset where y_spurious is available, but tested in a different setting where unavailable? It would be helpful if the authors could discuss this in their limitations section.\n2. The work is restricted to classification settings, rather than open-ended generation. This is a reasonable assumption but also significantly limits the applicability of the approach. This should be discussed in the limitations section, if not earlier.\n3. While the evaluations are reasonably comprehensive, the paper would benefit greatly from an explicit test of out-of-distribution generalization w.r.t. training domain. For example, how well would a model trained to follow instructions on SS be able to follow instructions on SMNLI without training?\n\nTo my eyes, this paper presents a flexible method for specifying which features should be used and which ignored at inference time. Yet, the authors consistently frame the paper in terms of spurious or causal features. In practice, it is often unclear whether a feature is actually casual. I wonder if the paper might benefit from framing similar to \"core vs. spurious\", or \"intended vs. unintended\", rather than \"spurious vs. causal\"?\n\n**Minor**: The final section of the related work, on refocusing LLMs, is a little confusing in light of the broader arc of the paper. After reading, I half expected the authors to introduce something akin to LlamaGuard, rather than a new instruction fine-tuning method. I wonder if this could be relegated to a broader related work section later in the paper, for the sake of narrative clarity."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We introduce a method to trains LLMs to adaptively condition their task behaviours based on specified features."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024focus,\ntitle={Focus On This, Not That! Steering {LLM}s With Adaptive Feature Specification},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zWASuY0t6o},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite the success of Instruction Tuning (IT) in training large language models (LLMs) to perform arbitrary user-specified tasks, these models often still leverage spurious or biased features learned from their training data, leading to undesired behaviours when deploying them in new contexts. In this work, we introduce *Focus Instruction Tuning* (FIT), which trains LLMs to condition their responses by ''focusing on'' specific features whilst ignoring others, leading to different behaviours based on which features are specified. Across several experimental settings, we show that focus-tuned models can be adaptively steered by focusing on different features at inference-time, such as (a) improving robustness by focusing on task-causal features and ignoring spurious features, and (b) mitigating bias by ignoring demographic categories. Furthermore, FIT can steer behaviour in new contexts, generalising under distribution shift and to new unseen features at inference time, thereby facilitating more robust, fair, and explainable LLM applications in real-world environments."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"instruction tuning",
"LLMs",
"spurious correlations",
"robustness",
"distribution shift",
"bias"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/41543deac08c63eaaab2da5c69290b61ed86461b.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Focus On This, Not That! Steering LLMs With Adaptive Feature Specification"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zWYHsbuedA | Combating Hidden Vulnerabilities in Computer Vision Tasks | main | Active | Computer Vision;Hidden Vulnerabilities | applications to computer vision, audio, language, and other modalities | 5;5;5;5 | 4;4;5;3 | 3;2;3;2 | 3;2;3;2 | 4;2;3;3 | 5 | 4 | 2.5 | 2.5 | 3 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "While MARTINI shows promising results on small and medium-sized models, is it scalable to large-scale models, such as LLaMA-7B or GPT-3? Understanding potential challenges or limitations in applying MARTINI to these larger models would clarify its practical applicability in real-world, large-scale settings."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper introduces a trigger reverse-engineering mechanism and a novel transformation layer that enables it to generalize across various backdoor attacks, including those that modify abstract features. \n\nThis paper provides extensive evaluation on 14 types of backdoor attacks across several datasets and model architectures and can outperform 12 state-of-the-art techniques by significantly reducing the ASR within an acceptable process time."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents MARTINI, an innovative backdoor defense mechanism designed to effectively mitigate a diverse range of backdoor attacks in deep learning models. MARTINI operates by reverse-engineering trigger methods to generate transformed samples that resemble backdoor-affected inputs. These samples are subsequently used to retrain the model, aiming to weaken or eliminate backdoor vulnerabilities. The method is evaluated across multiple tasks, including self-supervised learning and object detection, demonstrating its capability to significantly reduce attack success rates with minimal impact on model accuracy. MARTINI proves to be versatile, adaptable to various types of backdoors, and resilient across different attack scenarios, providing a robust and comprehensive defense against backdoor threats in deep learning models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "MARTINI's search process primarily optimizes a single type of backdoor attack and generates diverse trigger patterns within that type by adjusting the transformation layer parameters. However, when faced with multi-trigger or complex backdoor attacks, this simple iterative search may struggle to effectively identify the optimal parameter combinations, potentially leading to limitations in MARTINI's effectiveness in detecting and defending against such advanced backdoor attacks.\n\nSome critical information is missing in the evaluation section. For example, what are the hardware settings used for evaluation? What are the training parameters used for the decoder and victim model during model training? Without such information, MARTINI’s effectiveness may be limited, especially in resource-constrained or high real-time demand scenarios.\n\n\nThe paper primarily evaluates MARTINI on small and medium-sized deep learning models and datasets; however, its performance and efficiency on large-scale pretrained models (such as GPT-3, LLaMA-7B, etc.) remain unclear."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What is the performance of the proposed scheme on larger datasets?\n\nCould you discuss the computational overhead and the potential impact on the classifier? Please refer to the weaknesses for a detailed explanation."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "It shows good performance in defending against backdoor attacks with complex and semantic triggers.\n\nThe design is trivial, easy to understand, and can be applied to classification, self-supervised learning, and object detection."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents MARTINI, a mitigation technique that removes backdoor from models. It takes a general approach by modeling and reverse-engineering backdoor triggers through feature transformation. The method uses a specially designed transformation layer that can approximate various types of backdoor attacks by refining feature vector. It then generates synthetic backdoor samples using this transformation layer and uses them along with correct labels to train out the planted backdoor. They also show MARTINI's generalizability to self-supervised learning and object detection tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The autoencoder-based trigger reverse engineering has been around since 2020 (e.g., Gangsweep, De-trigger autoencoder, etc.), and it makes good sense to model and reconstruct complex triggers in the feature space rather than the pixel domain. However, the performance of this scheme is constrained by the quality and effectiveness of the autoencoder, as it must be jointly trained (either fully or partially) using feedback from the classifier. This increases the difficulty of training by effectively raising the complexity of the model and objective function, especially for complex datasets with high-resolution inputs. The experimental results only show images with a maximum resolution of 178*218, which raises concerns about its performance on larger datasets.\n\nAnother concern is that it appears a separate transformation layer must be trained for each possible target class to reconstruct the feature space trigger. This would significantly increase the computational overhead for larger datasets with more classes, such as ImageNet, which has 1,000 classes.\n\nSince the proposed scheme lacks a detection mechanism, it will always require extensive autoencoder training, trigger reconstruction, and classifier fine-tuning to remove a hypothetical existing backdoor. This introduces unnecessary training overhead and could lead to performance degradation or distribution drift, as the new dataset with the recovered trigger might have a slightly different distribution. This makes the approach impractical for many real-world applications."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "How do you pair the decoded image with the target label? Like NC?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1) A new backdoor defense method has been proposed for mitigating the backdoor threat.\n\n2) Extensive experiments are conducted on five datasets to demonstrate the effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a trigger reverse-engineering method (MARTINI) for backdoor defense, which optimizes a transformation layer to induce the backdoor behavior on clean samples for trigger reconstruction. Extensive experiments are conducted with various backdoor attack & defense methods on five datasets to evaluate the effectiveness of MARTINI."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) . The authors' intuition behind the MARTINI is that the perturbation of backdoor triggers is dependent on the original image pixel values in its neighboring area. This assumption is inconsistent with the facts that many attackers inject the invisible sample-agonistic trigger to induce backdoor attacks [1,2] or more complex triggers [3]. Although the experiment seems demonstrate the effectiveness of the MARTINI on those independent cases ( BadNets, SIG), the theoretical analysis is needed. \n\n2) In fact, trigger reverse-engineering methods hardly reconstruct the original injected triggers instead of the same effect adversarial noise (like NC on most cases). How to only employ a simple transformation layer to ensure the high-quality reconstruction in Figure 1? And according to the pipeline shown in Figure 3 and the following loss function, it's confused to generate such vivid triggers from the clean samples.\n\n3) Is $a$ different from $a'$? Figure 1 is discussed in lines of 143 and 164 with different backdoor defense methods, where the figure needs to be refined for better readability. \n\n4) Writing typos should be revised, such line 37.\n\n5) Motivation section is not clear to support employing the transformation layer to achieve backdoor defense. \n\n6) Implementation details about MARTINI are not reported.\n\n\n[1] Liu Y, Ma X, Bailey J, et al. Reflection backdoor: A natural backdoor attack on deep neural networks[C]//Computer Vision–ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part X 16. Springer International Publishing, 2020: 182-199.\n[2] Wang R, Wan R, Guo Z, et al. Spy-Watermark: Robust Invisible Watermarking for Backdoor Attack[J]. arXiv preprint arXiv:2401.02031, 2024.\n\n[3]Zhang J, Dongdong C, Huang Q, et al. Poison ink: Robust and invisible backdoor attack[J]. IEEE Transactions on Image Processing, 2022, 31: 5691-5705."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "I would like the authors to provide explanations for the concerns raised in the first and second points. Additionally, for the third point, I would like to see visual examples of the reverse-engineered triggers in object detection tasks, particularly for classic trigger patterns such as \"black-and-white checkerboard,\" \"random,\" or \"watermelon\". If I had a clearer understanding of the threat model, I believe I would have given a higher score."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The experiments are highly detailed, covering a wide range of tasks from image classification to object detection.\n\n2. The paper provides a thorough and detailed explanation of the methodology, making it easy for me to fully grasp the nuances and specifics of the approach.\n\n3. The method appears to be effective from an intuitive standpoint."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes MARTINI, a new backdoor mitigation technique that effectively counters various backdoor attacks in deep learning models, including semantic-based triggers. MARTINI uses a trigger reverse-engineering method to generate backdoor samples with a similar attack effect, which, when paired with correct labels in training, removes backdoor effects."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary issue is an unclear threat model. In the Trigger Reverse-engineering section, how does the defender know the attacker's target class? Additionally, how does the proposed method handle an ALL-to-ALL attack, and a source specific attack?\n\n2. The proposed approach involves numerous modules and loss functions, which may make it difficult for the community to reproduce the results and fully utilize the method. While the authors provide justification for the necessity of the four loss functions in the ablation study in the appendix, I still have concerns. Why does it seem that each component contributes equally to the overall result? For instance, the performance degrades significantly when $L_{smooth}$ is removed. Although I understand its value for handling more stealthy backdoor attacks, why does it also perform well with an obvious trigger, especially in tasks like object detection with conspicuous triggers? My perspective is that for a method to be truly effective and broadly applicable, there must be a key component that plays a primary role, rather than having every part constantly contribute equally to the outcome. \n\n3. The authors claim that the proposed approach can mitigate backdoors in self-supervised learning and object detection. I am concerned this might be an overclaim. Firstly, the authors do not compare their method with existing backdoor defense approaches for object detection tasks (such as Django[1] and ODSCAN[2]). My main concern is that triggers in object detection tasks are often more obvious patches, so how does the proposed method effectively reverse-engineer these trigger patterns? As I mentioned in my second point, this may conflict with components such as $L_{smooth}$ and other modules in the approach. I would like to see examples of the reverse-engineered triggers in object detection tasks, especially for classic trigger patterns like \"black-and-white checkerboard,\" \"random,\" or \"watermelon.\"\n\n\n\n[1] Django: Detecting Trojans in Object Detection Models via Gaussian Focus Calibration, NeurIPS 2023.\n[2] ODSCAN: Backdoor Scanning for Object Detection Models, S&P 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024combating,\ntitle={Combating Hidden Vulnerabilities in Computer Vision Tasks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zWYHsbuedA},\nnote={under review}\n}"
},
"abstract": {
"value": "Backdoor attacks are among the most prominent security threats to deep learning models. Traditional backdoors leverage static trigger patterns, such as a red square patch. They can be removed by existing defense techniques.\nHowever, recent backdoor attacks use semantic features as the trigger. Existing techniques largely fall short when facing such backdoors. In this paper, we propose a novel backdoor mitigation technique, MARTINI, that effectively mitigates various backdoors. It features a specially designed trigger reverse-engineering method for constructing backdoor samples that have a similar attack effect as the injected backdoor across a spectrum of attacks. Using the samples derived from MARTINI, paired with the correct labels, in training can remove injected backdoor effects in deep learning models. Our evaluation on 14 types of backdoor attacks in image classification shows that MARTINI can reduce the attack success rate (ASR) from 96.56% to 5.17% on average, outperforming 12 state-of-the-art backdoor removal approaches, which at best reduce the ASR to 26.56%. It can also mitigate backdoors in self-supervised learning and object detection."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Computer Vision",
"Hidden Vulnerabilities"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b61cd33a28366f3ae94319f5b133abaf93f371b9.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Combating Hidden Vulnerabilities in Computer Vision Tasks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zXCnIyX9MG | Shared-AE: Unsupervised Identification of Shared Subspaces in High-dimensional Neural and Behavioral Activity | main | Active | Computational neuroscience;Multimodal;Social behavior | applications to neuroscience & cognitive science | 3;3;5;8 | 4;4;3;4 | 3;2;3;3 | 2;2;2;3 | 2;3;2;3 | 4.75 | 3.75 | 2.75 | 2.25 | 2.5 | -0.070535 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Questions\n\nHow does this work compare to the ICLR 2024 paper of Gondur, Sikandar,\nSchaffer, Aoi, and Keeley (Multi-modal Gaussian Process Variational\nAutoencoders for neural and behavioral data)? That paper also has a\nshared multi-modal embedding and separate (within modality) embeddings\nand has also been used for complex behavioral tasks ( hawkmoth\ntracking a moving flower and limb movement of drosophila with\nsimultaneous neural recordings).\n\nThe paper says that you\texamined the influence of latent dimensions on reconstruction\naccuracy. Was that using the test data\tor some\tseparate data?\t(If the\ttest data, how do\nyou justify that?) How are the other parameters set? -\tthe paper is vague on this.\n\nPlease elaborate on why equal latent subspace dimensions are required."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Strengths\n- The paper is pretty clearly written\n\n- The artificial data problem helps to clarify how the algorithm works\n\n- The real world experiments are\timportant to demonstrate practicality in this important area"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper develops a method for learning shared and private\nembeddings between two or more sources of information (modalities).\nThey apply the algorithm to two problems with behavioral and neural\ndata as well as an illustrative artificial data problem."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses\n\n- Missing comparison to an important related ICLR24 paper (see questions below). If the authors can compare to results from that algorithm, or otherwise justify the superiority of this method (or superiority in some settings/situations), I would likely improve my rating. \n\n- Unclear how parameters were set (see questions below)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "My questions are the ones raised in weaknesses and Minor weaknesses/questions."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tThe paper is well-presented. The goal is clear and many analyses including both simulated datasets and two experimental datasets with different behavioral complexities are analyzed. \n2.\tVarious analyses such as connecting behavioral variates to corresponding brain area through shared latents, and investigating the difference between modeling behavior as markers or raw videos in the learned shared features."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes an autoencoder-based framework for finding shared and private subspaces of multimodal neural and behavioral data. Their goal is to separate the shared subspace from the modality-specific subspaces. To do so, constraints (Cauchy-Schwarz divergence) are added on the distribution of subspaces to encourage/discourage their alignment for this particular goal. This method is evaluated on one simulated dataset as well as two distinct experimental datasets from mice."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tMethodological novelty seems minimal. The authors note “a novel regularization term designed to identify features common to both behavior and neural activity” as their main methodological novelty. However, a very similar regularization scheme has been previously proposed by Yi et al. (2022). The difference between Shared-AE and this work is not adequately discussed making the methodological novelty unclear. Also, the idea of using CS-divergence instead of standard VAE with KL-divergence is not novel either as previously proposed by Tran et al. (2021).\n\n2.\tThere are numerous methods on neural-behavioral modeling and finding shared vs. private subspaces, none of which are compared to and many which are not discussed. In general, the manuscript seems to mix up unsupervised latent variable models of neural data with latent variable models of neural-behavioral data in its discussions and writeup. The only neural-behavioral data discussed in Related work (but not compared to) is Schneider et al 2023. Another neural-behavioral model that is cited is Sani et al 2021, but it is not discussed or compared to, and is instead grouped with an unsupervised model of neural data. Another neural-behavioral model in Zhou and Wei 2020 is also simply cited but not compared to. The authors need to separate the neural vs. neural-behavioral models in their manuscript and provide sufficient discussion of differences between other neural-behavioral models with theirs. Comparison to these neural-behavioral models is also needed to show the advantage of this method. In addition to the above cited works, there are also some other very relevant neural-behavioral models that are not cited, for example:\n\nGondur et al. 2024: This work appeared in the previous ICLR and has a very similar architecture designed for the same purpose using GP-VAE. However, it is not cited, and the key differences are not discussed. Given how closely related this method is to the authors’ work, it can serve as a baseline. \n\nHurwitz et al. 2021: This work proposes a sequential VAE for modeling neural-behavioral data. This needs to be cited and discussed.\n\nSani et al. 2024: This work proposes an RNN-based architecture that separates shared/private subspaces in neural-behavioral data and needs to be cited and discussed. \n\n3.\tEffect of novel terms in the loss i.e., the CS-divergence and their inverses are not assessed. As this is the main addition to a standard multimodal AE architecture in this work, it is crucial to evaluate whether presence of each term contributes to current results or not. Even without these constraints, the reconstruction loss itself can enforce shared vs private subspaces (at least to some extent) as the shared ones reconstruct both modalities whereas private ones reconstruct the specific modality alone.\n\n\n4.\tLack of baseline comparison in real data analysis. The same baselines used in simulated dataset (Shi et al (2019), Singh Alvarado et al. (2021)) are not shown in real data. Additionally, there are several relevant works on neural-behavioral modeling some of which could be used as baseline to better assess what benefits Shared-AE adds as mentioned in item 2 above. \n\n5.\tThe authors claim that their framework is better for more complex/social behavior types than Schneider et al. (2023). But what about all the other neural-behavioral models? Is shared-AE more suitable for complex behavior than others and if so why? This claim does not seem convincing without further comparisons. \n\n6.\tI find calling this method unsupervised very misleading. In the context of neural-behavioral modeling, supervision typically means use of behavior for guiding behavior-related features of neural activity. In this sense, the proposed approach is fully supervised not only during learning but also during inference, putting it in the multimodal family. The manuscript refers to this method as unsupervised throughout the paper including in the title. This needs to be corrected. \n\n7.\tThe model uses hyperparameters $(\\alpha, \\beta, \\gamma, \\delta)$ to control the contribution of regularizations to the overall loss. However, the effect of these hyperparameters on the results are not investigated. Authors note that the results are robust to changes in the hyperparameters, but I did not find the results that show this robustness. Please provide these.\n\n\nMinor weaknesses/questions \n\n1.\tWhy does the method need to learn two separate shared latents? It seems these two should ideally correspond to the same thing. Why not have a single shared latent which is used in both decoders? \n\n2.\tIn the unpaired analysis, is shuffling happening across time? Why does maintaining performance in this scenario indicate avoiding modality leakage?\n\n3.\tWhat is the basis for choosing the state dimension based on Fig. 8? Why are the reconstruction performance vs dimension so noisy in Fig. 8?\n\n4.\tWhat does min/max R2 refer to in Fig. 8?\n\ntypographical errors:\n\n-\tLine 302: Fig. 4.1 => Fig. 4? \n-\tLine 302: missing space between “data” and “(Fig”\n-\tFig. 7 caption includes panels E-F while the results are missing. “E-F: Prediction accuracy for neural activity and behavior under different distance groups.” It seems these panels are not included.\n-\tCaptions for panels B and C of Fig. 10 do not match. It seems the order is wrong.\n-\tFig 11 has very tiny titles\n-\tLine 1002: reference to Fig. A.9.3 is incorrect.\n\nReferences:\n\nDaiyao Yi, Simon Musall, Anne Churchland, Nancy Padilla-Coreano, and Shreya Saxena. Disentangled multi-subject and social behavioral representations through a constrained subspace variational autoencoder (cs-vae). bioRxiv, 2022. doi: 10.1101/2022.09.01.506091. URL https: //www.biorxiv.org/content/early/2022/09/05/2022.09.01.506091.\n\nLinh Tran, Maja Pantic, and Marc Peter Deisenroth. Cauchy-schwarz regularized autoencoder, 2021. URL https://arxiv.org/abs/2101.02149.\n\nYuge Shi, N. Siddharth, Brooks Paige, and Philip H. S. Torr. Variational mixture-of-experts autoencoder for multi-modal deep generative models, 2019. URL https://arxiv.org/abs/1911.03393.\n\nJonnathan Singh Alvarado, Jack Goffinet, Valerie Michael, William Liberti, Jordan Hatfield, Timothy Gardner, John Pearson, and Richard Mooney. Neural dynamics underlying birdsong practice and performance. Nature, 599(7886):635—639, November 2021. ISSN 0028-0836.\n\nSteffen Schneider, Jin Hwa Lee, and Mackenzie Weygandt Mathis. Learnable latent embeddings for joint behavioural and neural analysis. Nature, 617: 360–368 May 2023. ISSN 1476-4687.\n\nRabia Gondur, Usama Bin Sikandar, Evan Schaffer, Mikio Christian Aoi, and Stephen L Keeley. Multi-modal gaussian process variational autoencoders for neural and behavioral data. In International Conference on Learning Representations, 2024.\n\nCole Hurwitz, Akash Srivastava, Kai Xu, Justin Jude, Matthew Perich, Lee Miller, and Matthias Hennig. Targeted neural dynamical modeling. Advances in Neural Information Processing Systems, 34:29379–29392, 2021.\n\nOmid G Sani, Hamidreza Abbaspourazad, Yan T Wong, Bijan Pesaran, and Maryam M Shanechi. Modeling behaviorally relevant neural dynamics enabled by preferential subspace identification. Nature Neuroscience, 24(1):140–149, 2021.\n\nDing Zhou and Xue-Xin Wei. Learning identifiable and interpretable latent models of high-dimensional neural activity using pi-vae. Advances in Neural Information Processing Systems, volume 33, pp. 7234–7247, 2020. \n\nOmid G Sani, Bijan Pesaran, and Maryam M Shanechi. Dissociative and prioritized modeling of behaviorally relevant neural dynamics using recurrent neural networks. Nature Neuroscience, 27: 2033–2045, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "## My key question:\n- What is the technical innovation between this and the two other Yi papers? Is it just a different application?\n\n## Small points of clarification:\n- ll. 108-120: the ending here is a bit vague; would help to clarify what these sorts of models would miss in more complex tasks (and hopefully show in experiments)\n- ll. 141-152: Model scales as number of pairs of modalities; probably not a limitation in practice, but a few words about scaling might help.\n_ l. 165: what is $s'_t$ here? Is it the same as $s_t^{pre}$?\n- l. 171: what is $y'$? Is the prime a typo?\n- ll. 162-168: It would be nice to have a diagram of this, since one could easily lose track of the different linear models: If I understand correctly: $s^{pre}$ is a linear function of each modality's latents, and $s^i$ is a linear function of both modalities' _shared_ latents.\n- l. 196 In Eq 2, how well does this estimator do in moderate-sized latent spaces? Is it a reasonable estimator? One is effectively saying that the density belongs to a reproducing kernel Hilbert space, right?\n- ll. 259-264: This description is a bit terse and may be hard to follow for readers (like me) who were not familiar with this dataset. I realize details are in the supplement, but the main text should be a bit more self-contained.\n- ll. 318-321: Why, exactly, do we need a strong separation between modalities? What is the use case? I realize it affects decoding performance (e.g., Figure 3) but what might we use this analysis to conclude in an experiment?\n- Figure 4A: Sorting the rows and columns by some sort of biclustering algorithm might make the correlation structure more apparent. This matrix plot is not very compelling as presented."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Principled approach to structuring latent spaces based on a desired semantics: some information in each latent space is common to all modes, some is private.\n- The need for interpretable joint encodings is of high interest in neuroscience.\n- CS-Divergence is a reasonable means of effecting the separation of subspaces, and the authors have chosen a pretty reasonable-seeming method of approximating this quantity.\n- The experiments on real data use challenging datasets that encapsulate many challenges faced by the community."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper contributes to a growing literature on learning shared representations for multimodal data in neuroscience, where many researchers are interested in learning joint representations of brain data and behavior. Whereas many previous methods have focused on learning shared latent representations by combining latents learned from individual modalities, this work further partitions each modality's latent space into a private latent spaces and a shared latent space, which is linearly mixed with other modalities' shared latents. This separation is engendered by the use of a Cauchy-Schwarz Divergence for aligning shared latents and separating shared from private latents. Experiments on one synthetic and two real data sets show suggestive links between brain data and behavior, though what one is to make of these is a bit unclear. Moreover, the technical contribution of the paper is perhaps small when considered in light of other work cited.\n\nIn all, this is a solid paper that is, in my view, below the bar for acceptance without a more substantial technical contribution."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It is somewhat unclear what the technical innovation in this paper is beyond the Yi et al. preprint cited, as well as a similar paper by Yi and Saxena at EMBC in 2022 [1]. Both of those works use the same CS divergence setup as here, and I am struggling to see where the technical innovation is (though the application is somewhat different). I don't see the strength of the experimental results here being novel or interesting enough on their own to justify acceptance without an additional technical advance.\n- The authors use a latent space partition that is distinct from the Whiteway et al. paper but somewhat related to the Sani et al. work they cite. I realize that the PSID paper is linear, but the Shanechi group also has work on nonlinear methods that preserve this kind of partition (the most recent of which was likely unpublished when this work was submitted) that should probably come in for a fuller discussion.\n- In the framing of this work, I don't believe I fully understood the authors' rationale for needing shared vs. private subspaces. It's conceptually interesting, but the experiments simply focus on decoding. In what circumstances do we need such a partition, and what is the sign that not having it is hurting us scientifically? If this were clearer, I think it would be easier for readers to judge the success of the experiments. \n- The figures are all quite small and cramped, making them somewhat hard to parse. It's not always clear what the \"win\" is with the experiments.\n\n[1]: D. Yi and S. Saxena, \"Modeling the behavior of multiple subjects using a Cauchy-Schwarz regularized Partitioned Subspace Variational AutoEncoder (CS-PS-VAE),\" 2022 44th Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC), Glasgow, Scotland, United Kingdom, 2022, pp. 497-503, doi: 10.1109/EMBC48229.2022.9871466."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- The overall idea of disentangling the latent representation space using inter- and intra-modality loss regularizers has been previously explored in several works. There are also actually works proposing a similar autoencoder regularization framework in other settings [Tran et al. \"Cauchy–Schwarz Regularized Autoencoder\", JMLR 2022]. Perhaps one question that the authors should clarify with a clear statement is their methodological ML novelty (i.e., if the proposed regularized training scheme is completely novel, or if the paper only contains a strong empirical novelty).\n\n- Majority of the results show strong consistency between the disentangled latent features extracted from behavioral and neural data. Regarding the latent space visualizations (UMAP etc), how did the authors determine the latent dimensionality in each experiment? How consistent are these results with respect to changing this dimensionality?\n\n- The dataset retrieved for the 2AFC experiments seems rather small in terms of the number of trials. Also it seems to be divided only once into a train/test split. Therefore, I would ask if the authors performed any CV of the model training process, and evaluate the significance of their results in that sense?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Consistent experimental findings based on a simple disentangled representation learning model with a tailored training objective.\n- It has a unique empirical strength with a focus on neural data analysis from complex multi-modal social behavior experiments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a latent-space disentangling autoencoder to identify shared and private latent features from multi-modal neural and behavioral data. Proposed disentanglement is based on a Cauchy-Schwarz divergence based regularizer applied between different components (private and shared features) of the latent representations obtained via behavioral and neural encoders. Both inter and intra modality regularization losses are combined in addition to the standard autoencoder loss. Experimental analyses are performed first on a simulated dataset, and then on different complex behavioral datasets with neural recordings from mice."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Some of the empirical results need further validation, considering the details present in the Appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024sharedae,\ntitle={Shared-{AE}: Unsupervised Identification of Shared Subspaces in High-dimensional Neural and Behavioral Activity},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zXCnIyX9MG},\nnote={under review}\n}"
},
"abstract": {
"value": "Understanding the relationship between behavior and neural activity is crucial for understanding brain function. One effective method is to learn embeddings for interconnected modalities. For simple behavioral tasks, neural features can be learned based on labels. However, complex behavioral tasks and social behaviors require joint extraction of both behavioral and neural features. In this paper, we present an unsupervised autoencoder (AE) framework, called Shared-AE, which includes a novel regularization term that automatically identifies features shared between neural activity and behavior, while simultaneously capturing the unique private features specific to each modality. We apply Shared-AE, to large-scale neural activity recorded across the entire dorsal cortex of the mouse, during two very different behaviors: (i) head-fixed mice performing a self-initiated decision-making task, and (ii) freely-moving social behavior amongst two mice. Our model successfully captures both 'shared features', shared across the neural and behavioral activity, and 'private features', unique to each modality, significantly enhancing our understanding of the alignment between neural activity and complex behaviors."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Computational neuroscience",
"Multimodal",
"Social behavior"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/38d70bc732789c8dc1c12f68cbdcc0d7321fd345.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to neuroscience & cognitive science"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Shared-AE: Unsupervised Identification of Shared Subspaces in High-dimensional Neural and Behavioral Activity"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zY37C8d6bS | Semantic Skill Extraction via Vision-Language Model Guidance for Efficient Reinforcement Learning | main | Active | Reinforcement Learning; Vision-Language Models; Temporal Abstraction | reinforcement learning | 3;5;5;6 | 4;4;3;5 | 2;2;3;3 | 1;2;3;3 | 2;2;2;3 | 4.75 | 4 | 2.5 | 2.25 | 2.25 | 0.324443 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Is it possible to finetune the VLMs to achieve even higher performance since the tasks are quite different from what VLMs are typically trained on (Internet data)? And is it cost-effective to fine-tune VLMs just to label the skills?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "+ The proposed method utilizes the knowledge of VLMs as accurate skill annotations to iteratively identify helpful low-level skills for various tasks;\n\n+ The annotation from VLMs is done in discrete latent space from the codebook which improves both the learning of the codebook, and the representation of each latent;\n\n+ The paper provides theoretical analysis to guide the algorithm designs;\n\n+ The empirical experiments show decent and robust improvements over existing methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes using vision-language models (VLMs) to annotate temporally extended skills from offline datasets. Specifically, the annotation is on the latent space after vector quantization and is improved iteratively. It saves the extensive manual labeling process. The proposed method shows robust improvements over the existing state-of-the-art techniques and the authors conduct ablations on the effectiveness of VLM guidance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Is it possible to finetune the VLMs to achieve even higher performance since the tasks are quite different from what VLMs are typically trained on (Internet data)? And is it cost-effective to fine-tune VLMs just to label the skills?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Out of interest: How many skills were identified with VLM guidance compared to those without VLM guidance?\n- From my understanding, the codebook is learned self-supervised with labels provided by the VLM. Do you have any experiments in a purely IL setting? Or could you provide further motivation for the policy extraction via Q-Learning dependent on a reward function?\n- For 6.4: could you provide some graphical visualization of the table? I believe this would make the claimed slower degradation more apparent.\n- Does the VQ get a single frame or multiple frames to segment? How does the segmentation work? How are the initial and terminal state of a primitive skill $\\bar{s}$ determined?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed hierarchical policy is an interesting method, with promising results.\n- The discretization offered by VQ-VAE enables the use of Q-Learning for the high-level skill policy while retaining the continuous output space of a low-level action policy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The method proposes a hierarchical policy, with a high-level skill selection policy trained via offline RL. A second low-level policy trained by behavior cloning outputs the actions given the current state and selected skill. For the skill extraction, a vector quantizied VAE is trained with labels provided by a VLM."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The clarity of the text should be improved. It often is hard to parse, and the text leaves it unclear, what the intentions are. For Example:\n - The introduction leaves it unclear what the authors define as \"skill\"\n - The Preliminary section's formatting makes it hard to parse. The Markov Decision Process (MDP) abbreviation is never actually defined. For VQ-VAE sg() is not defined. Same for Eq. 3\n - Section 5 suddenly appears without any stated goal or context. What is it supposed to show?\n The experiment section introduction is hard to read. In the last sentence of Section Five, the text describes the following order:\n 6.5 -> 6.2 -> 6.1 -> 6.4 -> 6.3. Then, it references Fig. 5, which is late in the Appendix.\n - Line 210 states Eq. 3 but actually references Eq. 1\n- Figure 1 suggests the policy extraction process requires the codebook model. But the High-level policy, in my understanding, outputs a skill selection, i.e., a codebook entry. Why does it require the codebook?\n- Please clarify: Is the VLM only used during training or during rollout? Follow-up Question for the Ablation 6.3: For the method without VLM, in which stages has it been left out?\n- The mentioned main limitation is \"proper initialization\". Please explain what initialization means, as this is barely mentioned in the rest of the main text.\n- Ablations regarding the VAE policy decoder are missing. In general, the architecture of the policy decoder is not clear from the paper.\n\nThe proposed method and use of VQ-VAE + VLM is interesting. However, in its current state, the paper is hard to parse. Without improvements to the text, I tend towards reject."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* The experiment section appears to lack baselines related to skill learning among the comparisons. It seems necessary to include comparison groups associated with skill learning in the baselines."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper proposes a novel method called VanTA (Vision-language model guided Temporal Abstraction) that utilizes Vision-Language Models (VLMs) to extract meaningful skills from offline reinforcement learning. It claims to overcome the limitations of existing unsupervised learning approaches or methods that require human intervention."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel method called VanTA (Vision-language model guided Temporal Abstraction) that utilizes Vision-Language Models (VLMs) to extract meaningful skills from offline reinforcement learning. VanTA extracts skills through an iterative process that involves initial segmentation based on VQ-VAE, followed by using VLM to assign meaning to the skills and update the codebook."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The experiment section lacks baselines related to skill learning among the comparisons. It is difficult to determine whether VanTA's performance is due to the proposed algorithm or simply because it uses a skill learning framework. Given that skills are learned in the form of a codebook, a comparison with [1] seems necessary, and a comparison with [2], which utilizes LLM, also appears to be needed.\n \n[1] Mazzaglia, Pietro, et al. \"Choreographer: Learning and adapting skills in imagination.\" *arXiv preprint arXiv:2211.13350* (2022). \n[2] Fu, Haotian, et al. \"Language-guided Skill Learning with Temporal Variational Inference.\" *arXiv preprint arXiv:2402.16354* (2024)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The implementation of behavior cloning (BC) for low-level policy learning (Line 229) appears to require expert demonstrations in the offline dataset. Then, how does BC-based learning achieve good performance with mixed-quality datasets in the Minigrid experiments?\n\n2. When reading through the middle sections of the paper, I expect the VQVAE's encoder and decoder to serve as the high-level and low-level policies, respectively. Given that the training already utilizes expert datasets and Line 159 indicates that the VQVAE's decoder is replaced with a policy decoder, the authors should justify their architectural decision to train separate high-level and low-level policies instead of leveraging the VQVAE's existing structure.\n\n3. Given the large model size of the VLM used in this paper, I am not surprised that it outperforms other baselines. Did other baselines employ models of comparable size to the VLM?\n\n4. Was there any specific fine-tuning or prompt engineering applied to the VLM? While Line 104 points out the limited reasoning abilities of VLMs, it appears that this paper uses them as-is without any specialized adaptation techniques."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The research direction of grounding VLMs to manipulation and beyond represents a significant and promising area of investigation in the field.\n2. The paper is well-organized and demonstrates consistent terminology usage, making it clear to readers.\n3. The experimental validation is comprehensive, spanning multiple environments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces VanTA, an approach that integrates vision-language models (VLMs) into offline reinforcement learning (RL) for sparse reward settings. The author presents a hierarchical offline RL framework, where high-level skill policy training is augmented by the use of VLMs. Specifically, VLM is queried with image pairs to identify performed skills (e.g., pulling, pushing, picking, …), and this result is incorporated into the learning process of vector-quantized variational autoencoder (VQVAE), aiming to guide the discrete latent skill space to be more semantically meaningful. Conditioned on the skill, low-level control policy is separately learned via behavior cloning. Experiments on multiple environments show the superiority of VanTA."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The primary concern is the novelty of the contribution. The concept of extracting semantic skills via VLMs and grounding them to action policies has been previously explored, as demonstrated in [1]. The authors should clearly differentiate their approach from existing work, particularly regarding their VQVAE implementation compared to [1].\n\n2. A fundamental aspect of skill-based RL approaches is their ability to reuse extracted skills while reducing action space exploration, enabling rapid [2, 3] or even zero-shot [1] adaptation to new tasks. The paper should elaborate on the method's generalization capabilities beyond the tasks represented in the offline dataset.\n\n3. The application of VLMs in this work may not fully leverage their potential. Foundation models' primary advantage lies in their domain-agnostic knowledge, which should facilitate rapid policy adaptation across diverse domains. However, the proposed method's reliance on domain-specific reward structures potentially limits its generalization capabilities. The authors should address whether policies trained on specific tasks can generalize to different tasks.\n\n[1] One-shot Imitation in a Non-Stationary Environment via Multi-Modal Skill, ICML 2023\n\n[2] Skill-Based Meta-Reinforcement Learning, ICLR 2022\n\n[3] Accelerating reinforcement learning with learned skill priors, CoRL 2020"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024semantic,\ntitle={Semantic Skill Extraction via Vision-Language Model Guidance for Efficient Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zY37C8d6bS},\nnote={under review}\n}"
},
"abstract": {
"value": "Extracting temporally extended skills can significantly improve the efficiency of reinforcement learning (RL) by breaking down complex decision-making problems with sparse rewards into simpler subtasks and enabling more effective credit assignment. However, existing abstraction methods either discover skills in an unsupervised manner, which often lacks semantic information and leads to erroneous or scattered skill extraction results, or require substantial human intervention. In this work, we propose to leverage the extensive knowledge in pretrained Vision-Language Models (VLMs) to progressively guide the latent space after vector quantization to be more semantically meaningful through relabeling each skill. This approach, termed **V**ision-l**an**guage model guided **T**emporal **A**bstraction (**VanTA**), facilitates the discovery of more interpretable and task-relevant temporal segmentations from offline data without the need for extensive manual intervention or heuristics. By leveraging the rich information in VLMs, our method can significantly outperform existing offline RL approaches that depend only on limited training data. From a theory perspective, we demonstrate that stronger internal sequential correlations within each sub-task, induced by VanTA, effectively reduces suboptimality in policy learning. We validate the effectiveness of our approach through extensive experiments on diverse environments, including Franka Kitchen, Minigrid, and Crafter. These experiments show that our method surpasses existing approaches in long-horizon offline reinforcement learning scenarios with both proprioceptive and visual observations."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement Learning; Vision-Language Models; Temporal Abstraction"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/10b5710384f4c1f01787eabd0847a2eef7cb5c8e.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/c848e434d23b9258cbbaba6c5a9ee3a40630ca8c.zip"
},
"title": {
"value": "Semantic Skill Extraction via Vision-Language Model Guidance for Efficient Reinforcement Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zZ3eYI0QXN | Simple, Accurate, and Efficient Axis-Aligned Decision Tree Learning | main | Active | decision tree;gradient descent;tabular data | other topics in machine learning (i.e., none of the above) | 3;3;3;3 | 4;3;5;5 | 2;2;2;2 | 2;2;3;2 | 3;2;3;3 | 3 | 4.25 | 2 | 2.25 | 2.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Dear authors & reviewers,\n\nThe reviews for the paper should be now visible to both authors and reviewers. The discussion is open until November 26 at 11:59pm AoE.\n\nYour AC"
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": {
"value": "authors - reviewers discussion open until November 26 at 11:59pm AoE"
},
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses above and also:\n- Datasets sizes and feature dimensions used in the experiments suggest that that shallow trees with balanced structure are sufficient in most cases. A lot of practical problems in industry require high imbalanced and quite complicated structure to get the best performance. Do you expect that this might be the problem with your proposed approach?\n- Did you experiment with warm-starting your aglrotihm from CART or GradTree? I'd expect faster convergence but it would be interesting to see how it performs and whether it will get stuck in local optima..."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- it appears to be the first paper to suggest directly utilizing univariate splitting for probabilistic (soft) trees.\n- simple and clear algorithmic framework.\n- the resulting trees are interpretable due to specific choice of feature selection mechanism and balanced tree structure"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The adaptation of soft-decision tree for univariate trees. \"This is achieved by first setting up the tree structure and assigning features to each node based on their mutual information. Then, we optimize the thresholds (bias) and the values at the leaf nodes. The remaining part is the same as in the standard soft trees. Experiments on small scale datasets show some improvement over CART."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited novelty: I believe the main contribution here is the construction of the initial tree (cyclic assignment based on mutual information). There are potentially several other approaches which also should work decently (e.g. random feature assignment, warm-start from CART or other baseline). And the remaining part is standard training of soft trees. \n- Experimental evidences are limited. Small to medium scale datasets are chosen for benchmarking where it is hard to judge who is a real winner. Indeed, performance gain over CART in most cases are statistically insignificant. \n- Do we even need probabilistic (soft) tree formulation in this setup (and consequently SGD)? Note that the tree structure is fixed, feature allocation is fixed. Thus, I strongly believe that the MILP formulation will drastically simplify: we're gonna need binary variables only to keep track of instance assignment (which datapoint goes to which node). Consequently, MILP formulation here could be a strong (in terms of efficiency and performance) baseline to have. Especially for the given scale of datasets. In addition to this, some other non-greedy baseline is needed to fairly assess the performance. GradTree is clearly underforming w.r.t. CART and I wouldn't call it SOTA. Some other candidates seem to show stronger performance (TAO from Carreira-Perpinan & Tavallali, 2018)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Can you elaborate on how sensitive ProuDT is to choices of hyperparameters like tree depth and the method of feature ranking, providing experimental results of your preliminary analysis?\n- How do you measure the interpretability of ProuDT quantitatively? Including metrics could help substantiate claims regarding enhanced interpretability over existing methods.\n- There is 39 datasets on your repository. Can you provide a rationale for presenting results for only 12 of them?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The study uses probabilistic splits that align with gradient-based optimization, contributing to improved classification accuracy without complex computation.\n- The proposed method effectively addresses the limitations of traditional greedy decision trees and existing probabilistic trees by ensuring univariate splits and reducing computational complexity.\n- By reducing the number of learnable parameters, ProuDT simplifies the optimization process, potentially improving scalability and ease of deployment."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes ProuDT, a Probabilistic Univariate Decision Tree designed to enhance the accuracy, interpretability, and computational efficiency of traditional decision trees. Instead of fixed splits, the model assigns a single deterministic feature to each decision node, to ensure univariate splits while maintaining differentiability for gradient-based optimization. This approach reduces computational complexity by limiting the number of learnable parameters and enhances interpretability through transparent feature utilization. The authors conduct experiments on different datasets, showing that ProuDT outperforms other univariate and probabilistic decision trees."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The authors rely on the preliminary study to justify key decisions, such as tree depth and feature ranking strategies. However, without presenting these results, it is not possible to verify the robustness of these choices or understand the specific conditions under which ProuDT performs optimally.\n- The study uses a limited amount of datasets, which may not fully represent the diversity and complexity of real-world applications.\n- Claims about enhanced interpretability are based on the univariate nature of the splits, as each node’s decision relies on one specific feature rather than a combination, which does suggest an enhancement on interpretability. However, the paper would benefit from quantitative metrics or experiments to substantiate these claims.\n- There are instances, particularly in multi-class datasets, where performance falls short. Explicitly addressing these limitations would clarify ProuDT’s most suitable applications."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "No"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1)\tThe experiments in Figure 2 and Figure 3 select CART as a baseline. Why not chooses the state-of-the-art method GradTree as a baseline?\n2)\tFrom Table 5, the F1 scores of GradTree appear to be lower than the results reported in its original paper on IRIS and SPLICE [Marton et al., 2024]."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper proposes a simple axis-aligned soft tree approach, which may effectively help to avoid the issue of overfitting. As shown in the experimental results, the proposed approach shows better classification accuracy compared with previous methods. Besides, this paper also presents a detailed review of tree methods, and this could help readers better undersntand the background."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a new probabilistic univariate decision tree approach. The core idea is to determine the splitting feature of each node in advance, and simplify the decision function at each node in order to achieve a reduced number of parameters. Additional experimental comparisons have been conducted on 12 benchmark datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1) From my perspective, this work lacks sufficient innovation. The proposed method can be regarded as a special case of the typical soft decision tree, where the weight of decision function at each node is a pre-given one-hot vector. Specically, the proposed method determines the splitting feature of each node based on mutual information in advance. However, such approach may have following issues:\na)\tAs mentioned in the introduction, one important reason for introducing soft decision tree is to efficiently solve the optimal decision tree. However, the proposed method uses a highly heuristic approach to determine the splitting feature for each node, and such approach will undoubtedly converge to a suboptimal axis-algined soft tree. This contradicts the original motivation of the soft decision tree. \nb)\tThe arrangement of features is also somewhat unreasonable. We take the decision tree in Figure 1 as an example. In the third layer, the relatively important feature x_3 is placed in the left subtree, while the less important features x_2 and x_1 are placed in the right subtree. It is hard to discern the intuition behind this.\n\n2)\tThis work points out that the previous method, GradTree [Marton et al., 2024], requires a large amount of time and space. \na)\tHowever, the article does not provide a comparison of the storage overhead between the proposed method and GradTree. \nb)\tMoreover, as shown in Table 6, the proposed method takes much more training time on several multi-class datasets to achieve comparable results. Besides, since reducing computational complexity is an important goal, Table 6 should be included in the main text rather than in the appendix. \nFrom the perspective of experimental results, I think that the proposed method does not effectively address the shortcomings of previous methods.\n\n3)\tThere are some minor issues of this paper:\na)\tLines 58-71 mention three methods, and the corresponding references should be provided.\nb)\tThere is an extra bracket in the denominator of equation (1).\nc)\tThe layout on page five is loose and should be written more concisely."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Is the proposed feature selection method superior to other methods? (See Weaknesses part for details)\n\n- In practice, non-perfect binary tree structures such as decision lists or oblivious trees (where parameters are shared across nodes at the same depth) are often considered. How would the feature assignment algorithm in the proposed method adapt to such structures? \n\n- The experiments do not appear to use standard benchmark datasets. How were the datasets in your paper chosen from the many available options? In particular, looking at the GitHub implementations, it seems that many more datasets are implemented than those presented in the paper. How were the 12 datasets mentioned in the paper selected from among the many available datasets?\n\n- Figure 2 seems to show that increasing tree depth does not reduce training error. While this could be desirable from a generalization perspective, it suggests that the proposed method might not address the suboptimality associated with greedy algorithms. How do the authors interpret this result? \n\n- The paper suggests increasing tree depth as the number of input features grows. However, from a user’s perspective, this may not be ideal. Even with a large number of features, a greedy algorithm can automatically select the few most important ones. How does the proposed method address this issue?\n\n- While this paper focuses on building a single tree, it is also possible to extend the method to ensembles. Other methods, such as GradTree, have been extended to ensembles like GRANDE[2]. Would the proposed feature selection method work effectively in an ensemble setting as well?\n\n----\n\n[2] Marton et al., GRANDE: Gradient-Based Decision Tree Ensembles for Tabular Data, ICLR2024"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Training axis-aligned decision trees using gradient-based methods looks simple but difficult and important, as it offers potential benefits in terms of accuracy, computational cost, and interpretability. I believe that this simple yet effective approach could provide value to the research community. However, I feel that the evaluation supporting the effectiveness of the proposed method is currently insufficient. Strengthening this aspect would enhance the quality of the paper."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a simple method to address challenges associated with feature selection for each splitting node and tree depth settings when learning axis-aligned decision trees using gradient-based methods. The paper demonstrates the effectiveness of the approach through experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In Section 4.1, hyperparameters are set using the dataset employed for evaluation. This raises concerns about whether the settings are robust. If the intent is to highlight the general applicability of default values across different datasets, the default values should be determined using datasets with diverse characteristics. Then, the method should be evaluated on datasets not used for determining these default values. Furthermore, in addition to reporting the performance with default values, the paper should include results after parameter-tuning all methods using cross-validation for each dataset. While the focus on simplicity is understandable, it does not justify limiting the scope of evaluation. \n\nIt appears that the paper suggests increasing tree depth when the number of input features is large. However, from the user’s perspective, this may not be desirable. For example, greedy algorithms can select important features during training, meaning that even with many features, the algorithm can automatically focus on a few important ones. The inability to leverage this advantage seems like a drawback. If the feature assignment algorithm in the proposed method addresses this concern, it should be explicitly demonstrated. \n\nThe experiments are conducted on very limited datasets. Although the datasets are drawn from various papers, the selection criteria are unclear. Given the availability of curated Tabular-Benchmark[1], it would be more convincing to use such benchmarks to avoid concerns that the selected datasets were chosen arbitrarily to produce favorable results. \n\nAlthough comparisons are made between CART and GradTree, as noted in the Related Works section, there are many other methods that warrant comparison. Moreover, the core idea of the proposed method appears to be the feature ranking algorithm, which assigns features to nodes. It would be beneficial to evaluate this algorithm. For instance, comparisons with simple methods that assign features randomly to nodes or the application of a temperature-controlled softmax function to $\\boldsymbol{w}_i$ in Equation (1), gradually lowering the temperature during training, could provide insights into the method’s effectiveness.\n\n----\n\n[1] Grinsztajn et al., Why do tree based models still outperform deep learning on typical tabular data? NeurIPS 2022 Datasets and Benchmarks Track"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024simple,\ntitle={Simple, Accurate, and Efficient Axis-Aligned Decision Tree Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zZ3eYI0QXN},\nnote={under review}\n}"
},
"abstract": {
"value": "Decision Trees (DTs) are widely used in various domains for their simplicity and interpretability. However, traditional DTs often suffer from low accuracy and reduced robustness because they rely on fixed splits and a greedy approach to decision-making. While recent approaches combining decision trees with optimization seek to balance accuracy, computational efficiency, and interpretability, they still fall short. In this paper, we introduce a novel Probabilistic univariate Decision Tree (ProuDT), a non-greedy, axis-aligned tree that aims to address these challenges and achieve significant improvements. By assigning a single deterministic feature to each decision node, ProuDT ensures univariate splits while preserving the differentiability of soft decision trees for gradient-based optimization. This tree enhances interpretability through transparent feature utilization in decision-making. Additionally, ProuDT simplifies the optimization process and reduces computational cost by avoiding complex parameters. Extensive experiments on tabular datasets demonstrate ProuDT’s superior performance and scalability in binary and multi-class classification tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"decision tree",
"gradient descent",
"tabular data"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3675ce4db1f9f28211b5ce7a5869e317c098aff5.pdf"
},
"presentation": null,
"primary_area": {
"value": "other topics in machine learning (i.e., none of the above)"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Simple, Accurate, and Efficient Axis-Aligned Decision Tree Learning"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zZ6TT254Np | Synthesizing Realistic fMRI: A Physiological Dynamics-Driven Hierarchical Diffusion Model for Efficient fMRI Acquisition | main | Active | Time Series;Diffusion | learning on time series and dynamical systems | 3;5;5;6 | 4;4;4;3 | 1;3;3;3 | 1;4;2;3 | 2;4;3;3 | 4.75 | 3.75 | 2.5 | 2.5 | 3 | -0.662266 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. What is the sampling frequency for the fMRI?\n2. It would be beneficial to analyze the reconstructed signal to determine if the observed patterns align with expectations. Calculating the spectrum and fractal characteristics would provide an important validation of the model’s effectiveness. Additionally, some neurological features can also be checked. For instance, if the data is from resting-state fMRI, does it reveal the default mode network? \n3. Line 465 typo 69->96"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The proposed method, which captures connectivity and spectral features, is a novel approach. \n2. The method is rigorously validated using multiple benchmarks and ablation studies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a novel approach for generating realistic fMRI data using diffusion models, specifically designed to account for regional interactions and spectral features of the brain. The model captures regional connectivity in a hierarchical structure, where fine-scale signals are conditioned on larger-scale signals. To learn spectral features, it incorporates loss functions that capture fractal characteristics. Results demonstrate improved performance over existing time-series forecasting and diffusion-based models. Additionally, ablation studies validate the effectiveness of each model component."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper has limited reproducibility due to missing details about data preparation and experimental setup. Additional information is needed on the dataset used, including whether it involved resting-state or task-based fMRI, whether subjects were healthy or under specific conditions, and the rationale for selecting regions of interest (ROI), which were reduced from 268 to 82. Clarification on data split (e.g., train/test division, sample counts) is also required. If the codebase will not be provided, the paper should include a detailed description of the network architecture (such as layer specifications and activation functions) and the training setup for benchmark methods in an appendix.\n2. The practical implications, particularly the clinical applications of the proposed method, are somewhat unclear and could benefit from further exploration and discussion."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The reviewers have raised several questions regarding the weaknesses of this work:\n\n1). Qualitative Comparisons: The authors are strongly encouraged to provide qualitative comparisons between the synthesized and real fMRI signals to allow for a qualitative assessment of the model’s performance.\n\n2). Optimal Range of Brain Regions: The results suggest an “optimal” range of brain regions that enhances performance. How is this range determined, and is it manually set? Reviewers are concerned about the reliance on manual design for determining this optimal range.\n\n3). Validation of Multifractal Properties: How do the authors validate that the generated signals preserve multifractal properties?\n\n4). Risk of Overfitting: Do the variance schedule parameters ($\\alpha_n$ and $\\beta_n$) and the historical data used in training the PDH-Diffusion model lead to overfitting?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "Overall, the major strength of this work lies in its novelty. The authors have developed an innovative framework that captures complex interdependencies and multifractal dynamics within synthetic fMRI signals.\n\nSpecifically, their contribution includes integrating three key components into the diffusion process: a hypergraph-based signal generator, a dynamics properties guiding module, and a cross-brain region progressive diffusion model. This integration enhances the realism of the generated signals. The authors provide a robust theoretical foundation for their methods and perform extensive quantitative analysis, demonstrating the framework’s accuracy and effectiveness in time series forecasting. The paper is well-organized and includes relevant background information. Results from the proposed method outperform multiple peer models in time-series forecasting and diffusion models, as evidenced by superior MAE, MAPE, and RMSE scores, highlighting the model’s effectiveness."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors proposed a novel framework named the Physiological Dynamics-Driven Hierarchical Diffusion Model (PDH-Diffusion) for fMRI analytics. The PDH-Diffusion framework integrates two essential brain physiological properties, hierarchical regional interactions and multifractal dynamics, into the diffusion process. The primary goal is to improve diffusion models’ capability to generate realistic fMRI time series signals by accurately capturing these physiological characteristics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The reviewers have multiple concerns about the framework and potential impact in this work. \n\n1). The confusion about physiological fMRI. Usually, fMRI are categorized into resting-state and task-based fMRI. The resting-state fMRI is commonly scanned without specific stimulus, whereas task-based fMRI is acquired based on external stimulus, such as 7 tasks in HCP. Is physiological fMRI is either resting-state or task-based signal? The authors do not clarify the concept even in Introdcution section.\n\n2). Limited motivation and impact. In Abstract, the authors mentioned \"Functional magnetic resonance imaging (fMRI) is essential for mapping brain activity but faces challenges like lengthy acquisition time and sensitivity to patient movement, limiting its clinical and machine learning applications.\" It seems that the authros' work can advance the fMRI for clincial application, but the authors do not generate some neurological or psychiatric fMRI to validate. From reviewers' perspective, using the innovative PDH-Diffusion model, it can assist physician to provide lengthy fMRI signal which will denfinitely reduce the inconvenience of patients. Only generating healthy fMRI can impair the motivation and impact of this work.\n\n3). Lacking of qualitative comparison. The authors have provided an extensive quantitative validation of PDH-Diffusion model with other peer methods using MAE, MAPE, and RMSE. Unfortunately, the autors do not provide any qualitative results, such as Functional Connectivity Map or Brain Connectivity Maps, of PDH-Diffusion. That is, although averaging metrics such as MAE, RMSE, MAPE across 10 runs may demonstrate robustness, these metrics cannot fully capture the quality or realism of the synthesized fMRI signals. Notably, the qualitative results is also vital in clinics, since these results showcase which brain regions are severly impaired by neurological disorders. Importantly, there is no visual representation given of the generated fMRI signal, which would be valuable for assessing their plausibility. \n\n4). Several technical issues. The variance schedule (parameters $\\alpha_n$ and $\\beta_n$) in the diffusion process may not be fully optimized for different regions or scales, potentially leading to inappropriate noise levels in certain hierarchical levels. This could result in over-smoothing or overfitting at certain levels. Additionally, conditioning on historical data could lead to overfitting if the model becomes too dependent on past values, especially if the training data does not represent the full spectrum of brain dynamics. Without explicit mitigation measures, such as adaptive variance schedules or regularization techniques, these issues may limit the model’s ability to generalize to new or varied patterns, impacting its robustness and effectiveness.\n\n5). Multiple typographical mistakes. In Section 4.3 (ABLATION ANALYSIS) there are many typographical errors. The term “share radio” is used instead of ‘share ratio’ in “The influence of share radio” this section. Similar typographical errors appear in the caption and text of Figure 3(b), as well as in Section 3.4. These typographical mistakes impair the redability of this paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. As noted in the weaknesses, synthetic neural signals are particularly intriguing in the neuroscience field. Would this framework be adaptable for application or testing on other fMRI datasets, especially those with different clinical populations, to assess its generalizability? \n\n2. Typically, fMRI signals are recorded in a low temporal resolution. Could the model be extended to work with high temporal resolution modalities, such as magnetoencephalography (MEG) or electroencephalography (EEG), where realistic signal synthesis could enhance downstream analyses?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper established a novel framework for fMRI synthesis taking the dynamic characteristics and structure of the brain into consideration which has the solid and strong theoretical support.\n2. The model incorporates hierarchical brain regional interactions and multifractal dynamics, enabling it to generate fMRI signals that better reflect physiological properties."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes 'PDH-Diffusion', a framework that synthesizes fMRI data through a hypergraph-based hierarchical signals generator, the properties of a dynamic guiding module, and a cross-brain region guiding progressive diffusion model. The authors first introduced scientific priors, motivation, and concrete realization for building these modules in detail. They then provided the quantified results compared with previous approaches and the ablation experiment, showing their architecture has better performance in generating fMRI than other baseline models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In the ''Experimental Setups'' section, the experiment did not test on multiple fMRI datasets or finer parcellation atlas, but only used the HCP dataset on the AAL atlas, which makes the results of the experiment not that solid. Meanwhile, I would suggest the framework could be extended to and tested in other modalities, for instance, EEG signals.\n2. Although the paper introduces a novel framework for generating fMRI data, it lacks validation on the generated data’s impact on downstream tasks, such as disease diagnosis, or brain network analysis."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In table 1, does “T_Pred=32 or 64” mean predicting 32 or 64 time points of fMRI data?\n2. If my speculation in the former question is right, it is counter-intuitive that forecasting more time points (96 vs. 32) did not lead to an increase in errors. Please clarify this. \n3. According to Fig 1, it looks like the resolution of synthesized data is bounded to the level of ROI. It is more desirable to synthesize fMRI dynamics at the level of vertex. Is the proposed method able to be applied at the vertex level as well? \n4. In line 451, please double check the definition of MAE."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The major originality of this study comes from its conceptual advances embedded in the proposed algorithm. It is indeed challenging to synthesize realistic fMRI data while preserving unique aspects of the brain system. The quality and clarity of the models in the main text are reasonably strong as well."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this study, the authors proposed a novel algorithm for synthesizing realistic functional MRI (fMRI) via physiological dynamics-driven hierarchical diffusion model. Then, the authors validated the feasibility of synthesized fMRI data by comparing it to other generative mechanisms. The scientific merit of this work mainly comes from the conceptual advances in their proposed algorithm. It is indeed challenging to synthesize realistic fMRI data while preserving unique aspects of the brain system. The authors combined three different modules, each serving different roles, to synthesize brain dynamics with preserved network-like structure and fractal components. Given results of the extensive validation experiment on the large fMRI cohort, the validity of the proposed algorithm is clear; yet, the scientific significance of this work is somewhat limited as there was no experiment demonstrating the practical usefulness of the proposed algorithm."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The major concern comes from the unclear significance of this work. As the authors argued in the Introduction, acquisition of fMRI is expensive. Thus, synthesizing fMRI signal can be tempting. Although expensive, real fMRI data reflects unique information of individuals. This study, however, was not able to demonstrate the synthesized fMRI still convey unique information of subjects. Slight improvement in forecasting future timepoints of fMRI signal does not suggest the significance or practical usefulness of the model. This works needs additional analysis or applications highlighting the unique advantage of the synthesized fMRI data from the proposed model."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024synthesizing,\ntitle={Synthesizing Realistic f{MRI}: A Physiological Dynamics-Driven Hierarchical Diffusion Model for Efficient f{MRI} Acquisition},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zZ6TT254Np},\nnote={under review}\n}"
},
"abstract": {
"value": "Functional magnetic resonance imaging (fMRI) is essential for mapping brain activity but faces challenges like lengthy acquisition time and sensitivity to patient movement, limiting its clinical and machine learning applications. While generative models such as diffusion models can synthesize fMRI signals to alleviate these issues, they often underperform due to neglecting the brain's complex structural and dynamic properties.\nTo address these limitations, we propose the Physiological Dynamics-Driven Hierarchical Diffusion Model, a novel framework integrating two key brain physiological properties into the diffusion process: brain hierarchical regional interactions and multifractal dynamics. \nTo model complex interactions among brain regions, we construct hypergraphs based on the prior knowledge of brain functional parcellation reflected by resting-state functional connectivity (rsFC). This enables the aggregation of fMRI signals across multiple scales and generates hierarchical signals. \nAdditionally, by incorporating the prediction of two key dynamics properties of fMRI—the multifractal spectrum and generalized Hurst exponent—our framework effectively guides the diffusion process, ensuring the preservation of the scale-invariant characteristics inherent in real fMRI data.\nOur framework employs progressive diffusion generation, with signals representing broader brain region information conditioning those that capture localized details, and unifies multiple inputs during denoising for balanced integration.\nExperiments demonstrate that our model generates physiologically realistic fMRI signals, potentially reducing acquisition time and enhancing data quality, benefiting clinical diagnostics and machine learning in neuroscience."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Time Series",
"Diffusion"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/bf4d4dbe1caba8d2758ab74a1229113674554a6a.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Synthesizing Realistic fMRI: A Physiological Dynamics-Driven Hierarchical Diffusion Model for Efficient fMRI Acquisition"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zZ8fgXHkXi | h4rm3l: A Language for Composable Jailbreak Attack Synthesis | main | Active | LLM safety;program synthesis;compositional modeling;jailbreak attacks;red-teaming;domain-specific languages;string transformations;AI safety research;black-box optimization;automated benchmarking | alignment, fairness, safety, privacy, and societal considerations | 5;5;6;6 | 2;3;4;3 | 3;2;3;2 | 3;3;3;3 | 2;3;4;3 | 5.5 | 3 | 2.5 | 3 | 3 | 0.707107 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What are the time and computational costs associated with using h4rm3l for synthesizing attacks?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper introduces the first formal, composable representation of jailbreak attack, providing a more systematic and comprehensive approach to assessing LLM vulnerabilities.\n- The efficacy of the proposed framework is effectively demonstrated through the synthesis of a substantial dataset comprising successful jailbreak attacks against multiple state-of-the-art (SOTA) LLMs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces h4rm3l, a domain-specific language (DSL) designed to formally represent jailbreak attacks on large language models (LLMs) as compositions of parameterized string transformations. The authors propose a framework that includes the h4rm3l DSL, a synthesizer with bandit algorithms for generating optimized jailbreak attacks, and a red-teaming software toolkit. The paper demonstrates the efficacy of h4rm3l by synthesizing a dataset of successful jailbreak attacks targeting six SOTA LLMs, showing higher success rates than previously reported attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The estimation of attack success rates relies solely on the assessment of 100 Claude-3-haiku responses by just two human annotators, which raises concerns about the generalizability of the findings.\n\n- The relationship between various jailbreak methods and their impact on the results is not clearly articulated, particularly how the synthesis with h4rm3l connects to these outcomes.\n\n- Some parts of the paper are vaguely written and are subject to elaboration and clarification.\n\n- The following issues are subject to further clarification:\n - What do the numbers (e.g. \"00536\") in Figure 4 represent?\n - Are the horizontal and vertical coordinates of Figure 5 meaningful?\n - In line 928, \"therefore we use proxy names\" Do the proxy names here refer to the letters G, B and U?\n - What do the abbreviations BS, RP, and RT in Tables 3 and 4 signify?\n - In line 1222, \"Table 2 and Table 3\" should be corrected to \"Table 3 and Table 4.\"?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Compared to directly building a red-teaming framework, what are the unique advantages of the formal language proposed in this paper?\n2. Does the proposed jailbreak attack demonstrate superior efficiency compared to the baselines discussed in the paper?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper addresses a timely and relevant topic.\n- The structure of the writing is clear and well-organized.\n- The paper presents an innovative idea by transforming the jailbreak task into a formal language implementation.\n- Implementing various existing jailbreak methods to support the proposed composite attack strategy."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces h4rm3l, a domain-specific language (DSL) for representing jailbreak attacks on large language models (LLMs) as compositions of parameterized string transformation primitives. The authors develop a synthesizer using bandit-based few-shot program synthesis to generate novel, high-success-rate jailbreak attacks against black-box LLMs. They demonstrate the effectiveness of h4rm3l by synthesizing 2,656 successful attacks targeting six state-of-the-art LLMs, achieving high success rates. The work also includes a zero-shot harmful LLM behavior classifier and an open-source red-teaming toolkit."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Unclear Motivation and Purpose: The motivation and unique advantage of transforming jailbreak tasks into a formal language are not entirely clear. In my view, directly developing a framework or tool that integrates multiple jailbreak prompting operators and scheduling strategies may already suffice for most jailbreak/red-teaming needs. Therefore, what specific advantages or unique capabilities does this formal language offer? How does it surpass the functionalities of traditional red-teaming frameworks? Additional clarification from the authors would be helpful.\n\n- Assessment Method in Section 3.3: Using GPT as a judge has become a mainstream approach for evaluating jailbreak success rates, and many recent studies follow standard methods. However, this paper appears to introduce a new prompt. Is this prompt demonstrably superior to those used in other jailbreak studies? Alternatively, what necessary adaptations or unique design choices were made to tailor this prompt for the work presented here?\n\n- Efficiency Comparison in Experiments: While the authors clearly present the effectiveness of the proposed method compared to baselines in jailbreak success rates (as shown in Fig. 4), a comparative analysis of jailbreak efficiency with existing methods is lacking. Figures 2 and 3 only show the number of iterations required by the proposed method. I suggest the authors provide a more straightforward and detailed efficiency comparison with baseline methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "This reviewer's questions focus on the methods' details. See weakness 1-3. This reviewer wants to know more about the details of this method."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. This paper studies jailbreak attacks from a novel perspective like software engineering, to synthesize (all) jailbreak attacks and evaluate the jailbreak robustness of LLMs.\n\n2. The experiment is comprehensive and the visual analysis is clear.\n\n3. The discussion part is detailed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces h4rm3l, a novel approach to cover sufficiently large and diverse sets of jailbreak attacks with human-readable domain-specific language. h4rm3l includes (1) a domain-specific language formally expresses jailbreak attacks as compositions of parameterized string transformation primitives, (2) a synthesizer with bandit, and (3) a red-teaming software toolkit. h4rm3l also provides a dataset of 15891 novel jailbreak attacks and safety benchmarking results for 6 SOTA LLMs with 82 jailbreak attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Section 3.1, especially line 166-167, makes this reviewer confused. It seems that Section 3.2 has few connections with Section 3.1. To specific, the description in Section 3.1 does not significantly contribute to this reviewer's understanding of the connection from the motivation to the method of this paper. For example, this reviewer wants to know more details about the generic decorator TransformFxDecorator and why it covers the space of all string-to-string transformations and could represent any jailbreak attack on black-box LLMs.\n\n2. Based on the first point, the reviewer is concerned that the author's claim of “representing all black box attacks” is an overclaim. As the author cited in Section 2, PAIR, and its following version TAP, employ a red-teaming assistant to jailbreak the target model. So the algorithm output is uncertain. The authors should explain the inclusive relationship between h4rm3l and other black-box jailbreak attacks.\n\n3. According to this reviewer's understanding, the process of synthesizing jailbreak attacks briefly includes two sub-processes: selection, composition, and evaluation. Section 3.2 and 3,3 describe the details of selection and evaluation, respectively. However, how are the selected jailbreak methods combined in this paper?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. To what extent can h4rm3l cover alternative jailbreak techniques, such as fuzzing or mutation-based attacks, beyond template-based methods?\n2. Could the paper further discuss how h4rm3l compares to other jailbreak methodologies and clarify whether it can fully accommodate these approaches or has constraints?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "h4rm3l offers a formalized framework for representing jailbreak attacks, addressing the need for a consistent, standardized approach in this area of research. By utilizing parameterized string transformation primitives, it enables the modeling of diverse and complex attack types, making it a versatile tool. Notably, h4rm3l has demonstrated impressive empirical success, achieving over 90% success rates in generating jailbreak attacks across several state-of-the-art language models, including GPT-4o, Claude-3, and Llama3. This showcases its robustness and effectiveness in exposing vulnerabilities in a wide range of LLM architectures, proving its scalability and relevance for both proprietary and open-source systems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes h4rm3l, a domain-specific language designed to describe and facilitate jailbreak attacks on language models. h4rm3l addresses the gap between the diversity of jailbreak attack techniques and the limited benchmarks currently available. It formally expresses jailbreak attacks as compositions of parameterized string transformation primitives, enabling it to capture a wide range of attack types and their combinations. Furthermore, h4rm3l incorporates a bandit-based approach to jailbreak attacks and synthesizes a dataset containing 2,656 successful novel attacks targeting six state-of-the-art (SOTA) open-source and proprietary LLMs. The results demonstrate that the synthesized attacks are diverse and significantly more successful than previously reported methods, achieving success rates exceeding 90% on these SOTA models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the paper is impressive, it does not sufficiently address its limitations. Beyond the template-based jailbreak methods and synthesis, other forms of jailbreak techniques exist, such as fuzzing or mutation-based or other approaches ([1], [2], [3]). Given that the authors propose a language to express various attack strategies, it is important to clarify the scope of h4rm3l in this regard. The paper should discuss how many types of jailbreak methods the language can effectively cover and whether it faces any limitations in representing certain attacks. Furthermore, it would be beneficial if the authors included a more comprehensive discussion of related work in the background section, addressing whether h4rm3l can accommodate or has limitations in expressing these alternative attack types.\n\n[1] https://www.usenix.org/conference/usenixsecurity24/presentation/yu-jiahao\n[2] https://www.usenix.org/conference/usenixsecurity24/presentation/yu-zhiyuan\n[3] https://www.usenix.org/conference/usenixsecurity24/presentation/liu-tong"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "a domain-specific language for composable string transformations and related program synthesis algorithms; application to LLM vulnerability representation and assessment."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024hrml,\ntitle={h4rm3l: A Language for Composable Jailbreak Attack Synthesis},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zZ8fgXHkXi},\nnote={under review}\n}"
},
"abstract": {
"value": "Despite their demonstrated valuable capabilities, state-of-the-art (SOTA) widely\ndeployed large language models (LLMs) still cause harm to society due to the inef-\nfectiveness of their safety filters, which can be bypassed by prompt transformations\ncalled jailbreak attacks. Current approaches to LLM safety assessment, which\nemploy datasets of templated prompts and benchmarking pipelines, fail to cover\nsufficiently large and diverse sets of jailbreak attacks, leading to the widespread\ndeployment of unsafe LLMs. Recent research showed that novel jailbreak attacks\ncould be derived by composition, however, a formal composable representation for\njailbreak attacks, which among other benefits could enable the exploration of a\nlarge compositional space of jailbreak attacks through program synthesis methods,\nhas not been previously proposed. We introduce h4rm3l, a novel approach\naddressing this gap with a human-readable domain-specific language (DSL). Our\nframework comprises: (1) The h4rm3l DSL, which formally expresses jailbreak\nattacks as compositions of parameterized string transformation primitives. (2)\nA synthesizer with bandit algorithms that efficiently generates jailbreak attacks\noptimized for a target black box LLM. (3) The h4rm3l red-teaming software\ntoolkit that employs the previous two components and an automated harmful\nLLM behavior classifier that is strongly aligned with human preferences. We\ndemonstrate h4rm3l’s efficacy by synthesizing a dataset of 2656 successful\nnovel jailbreak targeting 6 SOTA open-source and proprietary LLMs (GPT-3.5,\nGPT-4o, Claude-3-sonnet, Claude-3-haiku, Llama3-8b, and Llama3-70b), and\nby benchmarking those models against a subset of the synthesized attacks, and\npreviously published jailbreak attacks which were used as few-shot examples. Our\nresults show that h4rm3l’s synthesized attacks are diverse and more successful\nthan previously reported attacks, with success rates exceeding 90% on SOTA LLMs.\nWarning: This paper and related research artifacts contain offensive and\npotentially disturbing prompts and model-generated content."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM safety",
"program synthesis",
"compositional modeling",
"jailbreak attacks",
"red-teaming",
"domain-specific languages",
"string transformations",
"AI safety research",
"black-box optimization",
"automated benchmarking"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ccc84c48d57ca145e708d295a45a7b0c89c29bab.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/bc6dcfe11824427599cf616f2e60cebe231caf8c.zip"
},
"title": {
"value": "h4rm3l: A Language for Composable Jailbreak Attack Synthesis"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zZU69H8tcr | SparsitySolver: Efficient Reinforcement Learning-based Pruning for LLMs | main | Active | Large Language Models;Model Compression | foundation or frontier models, including LLMs | 3;3;3;6 | 4;5;4;3 | 1;2;2;3 | 2;1;2;3 | 2;1;1;3 | 3.75 | 4 | 2 | 2 | 1.75 | -0.816497 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "RL is known unstable. How to reverse the parameters that you have pruned before?\nDoes this method require training or just forward pruning?\nFor the reward compensation, will that increase reduce sparsity as the sparse item for different channels are different.\nWhat is the meaning of without need for additional computation?\n \"Weproposea simpleandefficient reinforcement learningenvironment, improving the\n sparserewardenvironmentinexistingRLpruningmethodswithouttheneedforadditional\n computation.\""
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Introduce RL into LLM pruning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors introduce RL into LLM pruning. I feel the paper is tough to understand as the authors always give some reverse statements."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Bad Writing (why reinforcement learning is a good choice? Why you say the problem of previous research and say your work is the first one.)\n\n\"Suchmanuallyorsemi-manuallydesignedprun\ningstrategiesoftenleadtosuboptimalresults,whichmakesreinforcementlearn\ningafeasiblesolution. However, current reinforcement learning-basedpruning\n methodsusuallyhaveredundantenvironmentdesignsormultipleagents, render\ningthemill-suitedtomassiveLLMs. Hence,weproposeSparsitySolver,which\n first incorporates reinforcement learningintothepruningofLLMs, supporting\n variouspruninggranularity.\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Quantization is a direct approach to reducing storage costs and inference time. Is this method compatible with existing quantization techniques?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The illustrations for this work are easy to understand, and the method's description is clear and straightforward. \n2. The approach demonstrates its effectiveness across multiple large language models (LLMs) of varying scales when compared to current state-of-the-art (SOTA) methods. Moreover, the pruned performance shows improvements in both general metrics, such as perplexity (PPL), and current LLM benchmarks.\n3. Additionally, the proposed search strategy highlights the diverse sparsity within LLMs across layers and reflects the overall sparsity distribution within these models, presenting an interesting observation for further research."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the shortcomings of current pruning methods that overlook the inner sparsity differences between layers in large language models (LLMs). The motivation is clear and straightforward, and the design of SparsitySolver integrates reinforcement learning to facilitate an intuitive exploration of various pruning granularities. Additionally, the implementation of performance compensation through a linear combination in the final linear layer is both simple and effective. Experiments demonstrate its effectiveness across both general perplexity (PPL) and LLM benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. To gain a comprehensive understanding of a pruning technique, it is essential to evaluate its performance on complex tasks such as machine translation and other capabilities of large language models (LLMs), including in-context learning (few-shot). \n2. One pressing application of pruning techniques is to reduce the operational costs of very large-scale language models, such as LLaMA2-70B and PaLM-540B. However, this paper does not provide support for these models."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. **Methodology:**\n 1.1. What is the justification for such a simplified RL state space design? Have you considered incorporating model structural information into the state representation?\n 1.2. How does the reconstruction compensation method handle different types of network layers? What guarantees its effectiveness?\n\n2. **Experiments:**\n 2.1. Can you provide detailed comparative experiments with existing RL-based pruning methods?\n 2.2. What are the results on larger-scale models (>70B parameters)?\n 2.3. Have you conducted stability analysis across different downstream tasks?\n\n3. **Practical Implementation:**\n 3.1. What is the end-to-end training time and computational resource requirement?\n 3.2. How do you balance the search cost versus performance improvement in practical deployments?\n 3.3. Are there any architectural limitations to your method's applicability?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper proposes a novel direction by incorporating reinforcement learning into LLM pruning strategy search. The idea of using RL to automate pruning strategy discovery shows some originality.\n2. The reconstruction compensation method for structured pruning is interesting, as it aims to restore performance without introducing additional parameters, which could be practically valuable.\n3. The method demonstrates some level of versatility by supporting both structured and unstructured pruning across different model scales."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents SparsitySolver, a reinforcement learning-based approach for pruning Large Language Models (LLMs). The method consists of two main technical components: a reinforcement learning framework that searches for optimal pruning strategies, and a reconstruction compensation method for recovering performance in structured pruning without introducing additional parameters. The authors propose a simplified reinforcement learning environment design where the state is represented by the total pruning ratio and the action determines layer-wise sparsity. They also introduce a parameter-free reconstruction compensation method that aims to restore the performance of structured-pruned models through linear combinations of preserved channels. The approach is evaluated on various LLM architectures (OPT, LLaMA-V1/V2, Mistral) across different model scales and pruning granularities (both structured and unstructured), with experiments on language modeling perplexity and zero-shot tasks. The method claims to achieve competitive performance compared to state-of-the-art pruning approaches while maintaining efficiency in the pruning strategy search process."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Limited Technical Novelty and Theoretical Foundation:**\n 1.1. The motivation for using RL in pruning strategy search is insufficiently justified. The paper fails to establish why RL is particularly suitable for this task compared to other approaches.\n 1.2. The RL environment design appears overly simplistic. Using only the total pruning ratio as the state space lacks theoretical justification and seems naive.\n 1.3. The derivation of the reconstruction compensation method lacks mathematical rigor. Many assumptions and steps are not properly justified or explained.\n\n2. **Significant Experimental Deficiencies:**\n 2.1. The experimental evaluation lacks comprehensive comparisons with other RL-based pruning methods in the literature.\n 2.2. No statistical significance analysis is provided for the reported results.\n 2.3. The zero-shot evaluation is superficial and fails to demonstrate the method's advantages conclusively.\n 2.4. The ablation studies are incomplete and fail to validate the necessity of each component.\n\n3. **Poor Paper Presentation:**\n 3.1. The overall organization is chaotic with inconsistent paragraph spacing and formatting.\n 3.2. The figures, especially Figure 1, are of low quality and fail to effectively illustrate the proposed method.\n 3.3. Mathematical notations and equations lack proper explanations and context.\n 3.4. Citations are inconsistently formatted and poorly integrated into the text.\n\n4. **Insufficient Technical Details:**\n 4.1. Critical implementation details of the RL agent architecture and training process are missing.\n 4.2. The practical aspects of the reconstruction compensation method are unclear.\n 4.3. The computational overhead and deployment considerations are not adequately discussed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Q1) What is the overall runtime against other pruning strategies?\n\nQ2) In Figure 1 reward is defined as $1/ppl$ whereas it is $10/ppl$ in the reward function which one is the correct one? If it is $10/ppl$, why do you scale the reward, does it provide any benefits?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* Paper is well written and easy to follow\n* Its novelty is redesigning the reward function, which shows it significantly impacts performance, unlike the previous approach.\n* Proposed methodology supports both structured and unstructured pruning\n* It does not introduce additional parameters for structured pruning\n* Results are strong in terms of both perplexity and downstream task performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper adapts the RL-based pruning framework for LLMs which has been previously shown to be effective in CNNs but performs poorly when applied to LLMs. It achieves it by densifying the sparse reward of the previous environment for the RL algorithm to efficiently utilize. Additionally, it introduces Compensation Through Reconstruction to alleviate the effect of the structured pruning. Lastly, experiments are conducted on popular LLMs like LLama V1-2 and OPT and results are competitive with other popular pruning methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The main issue with this paper is its evaluation setup and the lack of comprehensive experiments compared to previous pruning strategies.\n\n* The reason why its evaluation setup is biased is because of the following: unlike previous pruning strategies, this paper defines a proxy reward in terms of perplexity which at first glance makes quite a sense. However, perplexity does not always translate to better performance(i.e. FLAP vs wanda-sp from FLAP paper). Nevertheless, perplexity is still a valid metric for many papers but not for this one because pruning is determined by minimizing the perplexity of a certain dataset which is wikitext eval in this case. Unsurprisingly, this strategy performs quite good perplexity but I think this leads to reward hacking (Skalse et al., 2022). OPT-6.7B is a good example of this phenomenon where the pruned model has better perplexity than the dense. To settle all these potential issues, I think downstream evaluation should be prioritized because of the sensitivity of the perplexity. However, throughout the paper perplexity is the main metric whereas downstream evaluation is only performed for LLama-V1 7B with only 20 percent sparsity for structured and 70 percent for unstructured pruning. Thus, downstream evaluation should be performed for different models, model sizes, and sparsity ratios. Lastly, the perplexity of a different dataset also can be reported instead of training one.i.e. C4, PTB. \n\n\n* Moreover, some results are missing. For example, OWL with SparseGPT has a better perplexity score than the proposed methodology at 70 percent sparsity and is also as good as at downstream evaluation(SparseGPT: 48.03 vs 48.11 of the paper), which shows that the proposed methodology is not SOTA. One more note: even though I appreciate the reproduction, there are some discrepancies between reproduced scores and reported metrics from the original paper, i.e. BESA. So I believe it is a good idea to include one from the paper and in addition your reproduction results.\n\n\nReferences\nSkalse, J., Howe, N.H., Krasheninnikov, D., & Krueger, D. (2022). Defining and Characterizing Reward Hacking. ArXiv, abs/2209.13085."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "a reinforcement learning-based pruning technique for LLMs, strong performance across various pruning granularities"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024sparsitysolver,\ntitle={SparsitySolver: Efficient Reinforcement Learning-based Pruning for {LLM}s},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zZU69H8tcr},\nnote={under review}\n}"
},
"abstract": {
"value": "Large Language Models (LLMs) have achieved significant success in the field of Natural Language Processing (NLP). However, due to their large model size and high inference costs, the application of LLMs is restricted. Pruning is regarded as an effective method to reduce the size of LLMs. Mainstream pruning methods for LLMs typically apply a uniform ratio to prune all the layers or determine layerwise sparsity based on simple criteria. Such manually or semi-manually designed pruning strategies often lead to suboptimal results, which makes reinforcement learning a feasible solution. However, current reinforcement learning-based pruning methods usually have redundant environment designs or multiple agents, rendering them ill-suited to massive LLMs. Hence, we propose SparsitySolver, which first incorporates reinforcement learning into the pruning of LLMs, supporting various pruning granularity. SparsitySolver employs an improved reinforcement learning environment, allowing for a rapid pruning strategy search with a small-scale agent. Moreover, to lessen the performance decline caused by structured pruning, we propose a compensation method capable of restoring performance without introducing additional parameters to the model. We evaluate our approach on LLaMA-V1/V2, Mistral, and the OPT families across multiple pruning granularities, achieving performances surpassing the state-of-the-art methods."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Models",
"Model Compression"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/761236ff464773775dcbda22392e0e3accbe18ac.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "SparsitySolver: Efficient Reinforcement Learning-based Pruning for LLMs"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zZUCWkn4PL | Variance-Reduced Forward-Reflected Algorithms for Generalized Equations | main | Active | Variance Reduction Method;SGD;Generalized Equation;Variational Inequality;Minimax Problem | optimization | 3;5;5;5 | 3;3;2;3 | 1;3;3;3 | 1;2;2;2 | 2;2;2;2 | 4.5 | 2.75 | 2.5 | 1.75 | 2 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The maximally monotone mentioned in Assumption 1.2 should be explicitly defined (like equation (2) in Assumption 1.3). \n2. Can you provide some comparison for Assumption 1.4 with other nonmonotone operators (e.g., negative comonotone and interaction dominate (Lee & Kim, 2021))? \n3. It is better to provide a table to compare the complexity of proposed methods with baseline.\n4. Is it possible to apply SPIDER/SARAH estimator to reduce the complexity from $O(n+n^{2/3}\\epsilon^{-2})$ to $O(n+n^{1/2}\\epsilon^{-2})$?\n5. For Example 1 (equation (15)), the matrix $A_i$ and $B_i$ may be indefinite. How to guarantee the minimax problem be well-defined?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "see summary"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed variance-reduced forward-reflected method for generalized equations, achieving the oracle complexity of $O(n+n^{2/3}\\epsilon^{-2})$ to obtain an $\\epsilon$-solution. The main idea is using SVRG and SAGA estimator, which is popular in nonconvex optimization. The experiments also show the advantage of proposed methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "see questions"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to weakness part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper studies a very general problem, which covers lots of important optimization problems like nonlinear equations, variational inequalities, minimax problems.\n\n2. The author propose variance reduction version of foward-reflected method and establish new state-of-the-art oracle complexities for this problem."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies nonlinear inclusions with the form $G x + T x$, where $G$ has the finite-sum structure of $G= \\frac{1}{n}\\sum_{i=1}^nG_i$. The authors use several standard variance reduction technique to reduce the number of oracle calls $G_i$ and establish the complexity of $\\mathcal{O}(n+n^{2/3}\\epsilon^{-2})$."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The paper is too dense and the presentation makes the readers hard to follow. For example, in Section 1.1, the finite-sum structure should not be parallel to the problem formulation (NI, NE, VIP, Minimax). \n\n2. In the optimality certification part, the authors claim that existing stochastic methods often target special cases of NI and a better oracle complexity of $\\mathcal{O}(n+\\sqrt{n}\\epsilon^{-2}) is measured using a restricted gap function, which can not be directly compared. However, it is not clear what is the previous state-of-the-art results for this problem. What are the previous results for NI, NE and VIP? \n\n3. The authors use SAGA and SVRG for the variance reduced estimator, which may lead to worse oracle complexity under the cases, why not consider SPIDER?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- What is the dependency on the number of elements $n$ for the nonmonotonicity parameter $\\kappa$?\n- The construction eq. 10 seems interesting even in the deterministic case. What range of $\\kappa$ does the method achieve for $n=1$?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper is technically solid, treats variance reduction methods for weak MVI thoroughly, and does so in a modular fashion\n- The algorithmic construction in the constrained case (eq. 10) seems new and might be interesting in its own right (see question)"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers variance reduction methods for inclusion problems satisfying the (nonmonotone) weak Minty variational inequality (MVI) which is parameterized by some parameter $\\kappa$, whose magnitude controls the allows level of nonmonotonicity.\nExisting methods can largely be divided into two groups ($n$ is the number of elements in the finite sum and $\\varepsilon$ is the solution quality):\n\n- Halpern based method, which achieves a $\\tilde{\\mathcal{O}}(n+\\sqrt{n}\\varepsilon^{-1})$ complexity for making the norm of the operator small for the last iterate (see e.g. Cai et al. 2023 developing for cohypomonotone problems)\n- Extragradient and single-call variants which has a ${\\mathcal{O}}(n+\\sqrt{n}\\varepsilon^{-2})$ complexity to make the restricted gap function small for the averaged iterate (e.g. Alacaoglu & Malitsky 2021 for monotone problems)\n\nIn comparison this paper shows a ${\\mathcal{O}}(n+n^{2/3}\\varepsilon^{-2})$ complexity for making the norm of the operator small for the best iterate in weak MVIs. The paper shows convergence using the Loopless-SVRG and SAGA estimator combined with a single-call method that additionally only uses a single projection/resolvent."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "It seems like there are primarily two contributions over variance reduction methods for monotone problems regarding the complexity:\n\n- The guarantee is provided in terms of the norm of the operator (of the best iterate) instead of the gap (of the averaged iterate).\n- The guarantees extend to some range of weak MVIs (so beyond monotone problems)\n\n**W.1** Considering this, the main concern is that the allowed range of the parameter $\\kappa$ controlling the nonmonontonicity can be restrictive and that this is not discussed in sufficient detail. Take for instance Thm. 3.1 where $\\kappa \\leq \\mathcal O(\\sqrt{\\rho}/L)$:\n\n- In the case of SAGA we have $\\rho =b/(2n) \\leq 1/(2n^{1/3})$ (Cor. C.2)\n- in the case of SVRG we have $\\rho =p/2 =1/(2n^{1/3})$ (Cor. C.1)\n \nIn either case $\\kappa \\leq \\mathcal O (1/(Ln^{1/6}))$, so the nonmonotonicity parameter scales inversely with $n$. Additionally, the Lipschitz-averaged constant can be much larger than the Lipschitz constant ($n$-dependent). I found it very hard to parse whether there are any other dependencies on $n$, through e.g. the stepsize requirement ($\\eta$) in Thm 3.1, which depends on $C$ defined in Def. 2.1, while explicitly stated in Lem. 2.1, which contains again $\\rho$ through $p$. It would be very helpful if the authors could explicitly spell out the dependency. The range of $\\kappa$ is particularly important considering that even extragradient and optimistic methods (without relaxation) can achieve some range of $\\kappa$ (see e.g. [1] albeit for cohypomonotone problems).\n \n **W.2** The FRO estimator (l. 222) seems to be equivalent to what is used for the optimistic method in Böhm 2022, so I maybe wouldn't consider this a new construction. The construction in the constrained case (eq. 10), seems new, but in the current presentation its role is unclear. Is additionally term needed even in the monotone case or is it possible to get convergence for the norm of the operator already with FBF/FoRB based methods ala Alacaoglu & Malitsky 2021 (i.e. with $\\gamma=1/2$ in the current paper)? \n \n [1]: https://arxiv.org/abs/2210.13831"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Can the authors clarify how their $O(n+n^{2/3}\\epsilon^{-2})$ complexity compares to the $O(n+\\sqrt{n}\\epsilon^{-1})$ complexity in [1] for the operator residual norm?\n\n2. What are the key differences in assumptions or problem settings between this work and [1] that lead to the different complexity results?\n\n[1] Cai, Xufeng, Ahmet Alacaoglu, and Jelena Diakonikolas. \"Variance Reduced Halpern Iteration for Finite-Sum Monotone Inclusions.\" The Twelfth International Conference on Learning Representations."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "I think the paper is clearly written."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes SVRG/SAGA-type methods for generalized equations with the $O(n+n^{2/3} \\epsilon^{-2})$ complexity. But the rates in this paper seems not to be competitive with related works."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The authors said, \"Alacaoglu & Malitsky (2021); Beznosikov et al. (2023); Gorbunov\net al. (2022a); Loizou et al. (2021) claim an oracle complexity of $O(n+ \\sqrt{n} \\epsilon^{-2})$ to attain an $\\epsilon$\nsolution, but this is measured using a restricted gap function. Again, as highlighted in Cai et al.\n(2023); Diakonikolas (2020), this certification does not translate to the operator residual norm and is\ninapplicable to nonmonotone settings.\"\nHowever, it seems that [1] has proved the result of $O(n+ \\sqrt{n } \\epsilon^{-1})$ using SVRG-type algorithm for operator residual norm. How could the results in this work be competitive in that in [1]? \n\nThe authors said \"However, varying parameters or incorporating double loop/inexact methods in [1] must be used to achieve improved theoretical oracle complexity. We believe\nthat such approaches may be challenging to select parameters and to implement in practice.\" \nHowever, the experiments in this paper are quite weak (a synthetic experiment and a real-world experiment on logistic regression). If the proposed methods have a worse dependency on both $n$ and $\\epsilon$, but the authors still claim their methods are practical, maybe they should conduct a lot of large-scale experiments to support their claim, such as GAN and adversarial training in neural networks.\n\n[1] Cai, Xufeng, Ahmet Alacaoglu, and Jelena Diakonikolas. \"Variance Reduced Halpern Iteration for Finite-Sum Monotone Inclusions.\" The Twelfth International Conference on Learning Representations."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This paper proposes novel variance-reduced methods for generalized equations with state-of-the-art oracle complexity"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024variancereduced,\ntitle={Variance-Reduced Forward-Reflected Algorithms for Generalized Equations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zZUCWkn4PL},\nnote={under review}\n}"
},
"abstract": {
"value": "We develop two novel stochastic variance-reduction methods to approximate a solution of generalized equations applicable to both equations and inclusions. Our algorithms leverage a new combination of ideas from the forward-reflected-backward splitting method and a class of unbiased variance-reduced estimators. We construct two new stochastic estimators within this class, inspired by the well-known SVRG and SAGA estimators. These estimators significantly differ from existing approaches used in minimax and variational inequality problems. By appropriately selecting parameters, both algorithms achieve the state-of-the-art oracle complexity of $\\mathcal{O}(n + n^{2/3} \\epsilon^{-2})$ for obtaining an $\\epsilon$-solution in terms of the operator residual norm, where $n$ represents the number of summands and $\\epsilon$ signifies the desired accuracy. This complexity aligns with the best-known results in SVRG and SAGA methods for stochastic nonconvex optimization. We test our algorithms on two numerical examples and compare them with existing methods. The results demonstrate promising improvements offered by the new methods compared to their competitors."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Variance Reduction Method",
"SGD",
"Generalized Equation",
"Variational Inequality",
"Minimax Problem"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/198980eab7b1bf305d5a11bffc19e9a4f41ecc14.pdf"
},
"presentation": null,
"primary_area": {
"value": "optimization"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/e40086deb3765ed055207363e0c47b40e278d501.zip"
},
"title": {
"value": "Variance-Reduced Forward-Reflected Algorithms for Generalized Equations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zaDU4vMAUr | Bilevel Reinforcement Learning for Stock Data with A Conservative TD Ensemble | main | Active | Reinforcement learning;stock markets;portfolio optimization | reinforcement learning | 3;3;5;8 | 4;3;4;4 | 2;2;3;3 | 2;2;2;3 | 3;2;3;3 | 4.75 | 3.75 | 2.5 | 2.25 | 2.75 | 0.493742 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please see the weaknesses listed above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper addresses an interesting and important problem: stock trading in the offline RL setting. The bilevel optimization approach combined with conservative TD learning directly addresses practical challenges in offline RL for trading, particularly the out-of-distribution (OOD) generalization problem.\n\nThe paper effectively positions stock trading as a unique offline RL problem, underscoring the potential for dataset expansion through their decoupled MDP framework.\n\n The empirical results demonstrate a comparison with other existing models, including SARL, FinRL-SAC, FinRL-DDPG, and StockFormer. Real-world data sets, specifically the CSI-300 Index and NASDAQ-100, are used."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces \"MetaTrader,\" a reinforcement learning (RL) approach for stock trading that aims to overcome challenges of overfitting to offline data and lacking generalizability in non-stationary financial markets. MetaTrader trains policies to perform well on transformed out-of-distribution (OOD) data, with a conservative temporal difference (TD) ensemble, designed to mitigate value overestimation in offline RL. Through bilevel actor-critic training on both original and transformed stock data, MetaTrader builds robustness to OOD scenarios. Empirical results on two stock datasets indicate MetaTrader's superiority over existing RL and stock prediction methods, with improved performance in portfolio returns and risk-adjusted metrics like Sharpe ratio, demonstrating its potential for robust and adaptable trading strategies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The notations are not clearly explained and are sometimes used inconsistently, which makes them hard to follow. Some points related to clarity are outlined below.\n\n1. Observation space on page 2: “$K$ technical indicators that reflect the temporal trends of stock prices”. There is no explanation of what technical indicators are or how they reflect the temporal trends.\n\n2. State space on page 2: “The action-free market state $h_t$ is composed three types of latent states $s_t^{\\text relat}$, $s_t^{\\text long}$, $s_t^{\\text short}$ generated from the observation $o_t^{\\text price}$, $o_t^{\\text stat}$, $o_t^{\\text conv}$”. There is no explanation about how $s_t$ is generated by $o_t$. Instead, the authors refer to Eq (1) on page 3, however, Eq (1) does not include $s_t$. \n\n3. Dimension of $h_t$ on page 3: The dimension of $h_t$ is stated as the number of stocks times $D$, but there is no explanation of what the notation $D$ represents.\n\n4. Daily prices in Appendix A: The input data for daily prices are denoted by $p_t$. This should be written as $o_t^{\\text close}$. The notations are used inconsistently.\n\n5. Metrics in Appendix C: The evaluation metrics for the experiments are defined in Equations (4), (5), and (6). The definitions are very vague and the notations are not clearly explained, lacking connection to $o_t$, $h_t$, $z_t$, $a_t$ defined in Section 2. \n\n\nThe authors manipulate the input data using the three transformation methods. While the paper notes that data transformations are used to create diverse subsets, it does not provide a clear rationale for the chosen transformations. These transformations can substantially impact the underlying factors driving stock prices and alter correlations between stocks, so an explanation of their selection and intended effects would strengthen the work. Appendix A explains their methods; however, there is no justification or theoretical background provided. Although the empirical studies demonstrate that the proposed algorithm outperforms the existing methods using two real-world data sets, the benefits of using the proposed algorithm are not clear to me, because it functions like a black-box solution.\n\n\nUsing an ensemble-based conservative TD target to mitigate overestimation in offline RL, though effective, lacks novelty, as this technique has been explored extensively in previous research.\n\n\nSince this work primarily emphasizes empirical results over theoretical contributions, the experimental section should be more robust. For instance, the significance of the findings in Tables 3, 4, and 5 cannot be interpreted without standard deviations. To provide a clearer picture of the proposed algorithm’s stability, the authors could use a rolling training/test set and report both the mean and standard deviation of performance metrics. This would offer a more comprehensive view of the algorithm’s consistency and reliability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "•\tAs described in the weaknesses section, is there a potential issue of data leakage with the three types of data transformations mentioned? Additionally, why is it set to the top 10%? It would be helpful if the authors could clarify these points.\n\n•\tIn Figure 3, the augmented data is labeled from 1 to M, while the finetuning data is labeled as M+1 to M'. If I understand correctly, should it actually be M+1 to M+M'? If this is not a typo, please disregard this comment. If this is indeed a writing error, it would be helpful for the authors to adjust any other relevant parts of the paper accordingly.\n\n•\tLines 87–94 mention that the state data consists of action-free and action-dependent components. I would like to know more about the data processing methods used. For example, variables like price, cash holdings, and position can exhibit significant differences over extended trading periods. For instance, Apple’s (AAPL) price on the NASDAQ was around $12 in 2011, but by 2022, it had exceeded $130. This shift in stock prices can lead to substantial differences in cash, position, and price scales. Without normalization, how is RL training stability maintained? It would be helpful if the authors could explain this in detail."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "•\tOOD (out-of-distribution) issues are prevalent in the stock market. This paper focuses on augmenting offline data to train a policy with stronger generalization capabilities to handle distribution shifts in online stock data. This is a crucial and interesting topic.\n\n•\tThe writing is logically structured and easy to follow, making the content clear and accessible to readers. The arguments are presented in a straightforward manner, allowing readers to grasp the key points without difficulty.\n\n•\tThe paper includes appropriate experiments and ablation studies to support its findings. These experiments provide evidence for the proposed method’s effectiveness, though certain aspects of the experimental setup could be further improved for a more comprehensive evaluation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper applies RL to portfolio management, focusing on generalization in dynamic financial markets. Traditional RL models often overfit offline data, leading to rigid policies. To address this, the authors propose MetaTrader, introducing a bilevel actor-critic approach and a conservative TD learning variant with an ensemble-based target to reduce value overestimation. Experiments on two public datasets indicate that MetaTrader achieves improvements over existing RL and stock prediction models, suggesting enhanced adaptability in changing markets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "•\tRelying on training the policy with real and augmented data, followed by fine-tuning on real data to address the OOD problem in the stock market, is not an ideal approach. This can be observed in the following two aspects:\n\n1) The augmentation methods have limitations. For instance, do the F2 and F3 methods mentioned in the paper potentially introduce data leakage? Additionally, is the choice of the top 10% in F1 (why specifically 10%?) reasonable?\n\n2) The effectiveness of fine-tuning is questionable. Based on findings in the application of RL to finance, the performance of fine-tuning RL can be similar to or even weaker than training RL from scratch. The experiments in the paper do not include a comparison between fine-tuning RL and training RL from scratch to validate its effectiveness.\n\nNote: To clarify, “training from scratch” here refers to directly using the original data for portfolio management without involving data transformations in training.\n\n•\tThe experimental setup has some flaws, detailed as follows:\n\n1) Data Validity: The experimental data only goes up to 2022, without including more recent data (e.g., up to 2024), which makes the findings less convincing. Websites like Yahoo Finance (https://finnhub.io/), Alpaca (https://docs.alpaca.markets/), and FMP (https://site.financialmodelingprep.com/developer/docs#chart-intraday) provide easy access to the latest data, which would strengthen the study’s confidence.\n\n2) Stock Selection and Market Representation: While the CSI300 index theoretically consists of 300 stocks, only 88 were selected in this study. Additionally, both chosen stock markets consist of only around 80 stocks, without distinguishing between different market scales. This limited selection makes the results less persuasive, and it would be beneficial to include stock markets of varying scales for a more comprehensive analysis.\n\n3) Evaluation Metrics: The choice of metrics is somewhat limited. The study only focuses on return-related metrics (even though the Sharpe Ratio accounts for risk-adjusted returns, it still primarily measures profitability). Including risk metrics such as VOL and MDD would provide a more balanced evaluation of performance.\n\nNote: I noticed that this paper follows the experimental setup of StockFormer. However, using data from two years ago is not ideal and updating the data could significantly improve the credibility of the experiments. Furthermore, StockFormer included MDD as a risk metric, so it’s unclear why this study chose not to incorporate it in its evaluation criteria.\n\n•\tBenchmark Selection**.** Many financial companies still use machine learning and deep learning methods based on prediction, such as LightGBM, LSTM, and Transformer models. Including these in the experiments would provide a more convincing comparison. Qlib (https://github.com/microsoft/qlib) can be a helpful reference for implementing these methods."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How does the proposed method (and other baseline) determine the trajectory length? Are there any guidelines to pick a reasonable trajectory length if there are some tradeoffs between short and long trajectories?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- **Data transformation (augmentation) makes most of the application-specific features of finance.**\n\nAs a key component of the proposed approach, the paper proposed to perform data augmentation to enhance the generalizability of the model to out-of-distribution (OOD) data. In many RL settings, this is usually not a promising approach due to the difficulty of predicting/obtaining rewards for the OOD (state, action) pairs. \n\nHowever, in the finance application, this is not the case because we can assume individual actions do not make a huge impact on the market dynamics, and what depends on the actions (selling or buying) is only the individual reward they get from these transactions. This paper nicely exploits this structure and does data augmentation.\n\n- **Approaches, including bi-level optimization and fine-tuning, sounds reasonable and the components are harmoniously combined.**\n\nThe paper combines the aforementioned components in a sound way, and there are no components that seem redundant or unnecessary. The motivations behind why the proposed method needs each component are also well explained. While each component itself (e.g., fine-tuning and conservative learning) is not very novel, yet, I think this idea is worth sharing. \n\n- **Well done ablations and experiments.**\n\nThe baselines are picked from representative algorithms of approaches in predictions, online and offline RL. Also, ablations on w/ and w/o data transformation, w/ and w/o fine-tuning, and runtime comparison are all informative for readers.\n\n- **Clarity of the paper.**\n\nOverall, the paper is clearly written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper studies a way to train an RL policy for stock trading using past transaction data. To deal with the non-stationary and out-of-distribution data in the deployment phase, the paper proposes to perform (i) data transformation (augmentation), (ii) updating the Q function via pessimistic ensemble among multiple subsets of (augmented) data, and (iii) fine-tuning the data collected in the most recent time period. The experiment results demonstrate that the proposed method works better than existing online and offline RL approaches in the real stock transaction data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "To be honest, there is not much to point out for weakness. However, the following might have room for improvement.\n\n- **Related work on offline RL and offline-to-online RL**\n\nThe current related work focuses on the existing approaches for learning a stock-trading policy. Adding discussion on why the conventional offline RL does not work and why the proposed method works might be helpful for readers who are not very familiar with offline RL. Also, there is some work on offline-to-online RL, which does online fine-tuning after performing offline RL. This approach is relevant to the proposed method, and can be worth mentioning in the related work section.\n\n- **Description about the proposed method in Introduction**\n\nWhile the paper was in general very well written, the description of the proposed method in the introduction may have some room for improvement. It was difficult for me to imagine what the proposed method looks like only from the introduction (in particular, it was not clear to me what \"in-domain profits\" refer to and what the outer-loop optimization does), while It became clear after reading Section 3."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Please carefully reconsider how you structure your network inputs.\n2. Please ensure that the comparison between the proposed method and classical methods is conducted fairly."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-structured, and the ablation studies are conducted thoroughly."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper seeks to enhance classical reinforcement learning through the introduction of two techniques: bilevel learning and conservative TD ensemble. The effectiveness of these improvements is evaluated using experiments on two stock datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### Major Issues:\n\n1. **Lack of Discussion on Non-Stationarity**: There is insufficient discussion on how the paper addresses the issue of non-stationarity. As I understand it, the paper attempts to mitigate non-stationarity through specific data transformations. However, the rationale behind the choice of these three particular transformations is unclear. For instance, in F1, why is only the top 10% considered and not the bottom 10% as well? The action space allows for both long and short positions. Regarding F2, how does disrupting the order of observations in time help overcome non-stationarity? If time observations are treated as completely independent, the sequence would be unpredictable. For F3, as explained in Appendix A, future data is compressed into the present to create a non-empty input. Applying this approach in the out-of-sample leads to the future information issue. These questions raise concerns about the experimental validity.\n \n2. **Input Construction and Covariance Matrix**: A critical component of the model input is the sample covariance matrix between stocks. How exactly is this matrix computed? According to random matrix theory, when the number of assets is close to the number of time observations, the sample covariance matrix becomes singular and provides a biased estimate of the true covariance matrix. Although this method may still be applicable to the two datasets in the paper (with 300 and 100 stocks, respectively), it raises concerns about the scalability of this approach to larger datasets.\n \n3. **Experimental Shortcomings**:\n \n - First, lines 401-412 involve the calculation of true values, and there appear to be issues with this in the paper. To compute the true value theoretically, a globally optimal policy is required. I may have missed some details, but I hope the authors can clarify this in their rebuttal.\n - Second, regarding the performance metrics, the portfolio return aligns with the annual return. It would strengthen the results if the maximum drawdown (MDD) were also reported alongside the annual return.\n - Finally, the stock prediction strategy—buying only the single stock with the highest predicted value—is problematic, as it introduces excessive randomness into the strategy. A more convincing approach would be to purchase the top 10% of stocks based on cross-sectional prediction values, with equal weighting across them.\n\n### Minor Issues:\n\n1. Some of the terminology used in the paper is unconventional. For instance, \"portfolio return\" in Table 1 is more commonly referred to as \"cumulative return,\" and \"annual return\" should be labeled as \"annualized return.\" Additionally, the risk-free rate used in the analysis is not specified.\n \n2. Several references on ensembled Q-learning have been published, but this paper lacks a rigorous discussion of these existing works."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024bilevel,\ntitle={Bilevel Reinforcement Learning for Stock Data with A Conservative {TD} Ensemble},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zaDU4vMAUr},\nnote={under review}\n}"
},
"abstract": {
"value": "Reinforcement learning (RL) has shown significant promise in stock trading. A typical solution involves optimizing cumulative returns using historical offline data. However, it may produce less generalizable policies that merely \"memorize\" optimal buying and selling actions from the offline data while neglecting the non-stationary nature of the financial market. We frame stock trading as a specific type of offline RL problem. Our method, MetaTrader, presents two key contributions. First, it introduces a novel bilevel actor-critic method that spans both the original stock data and its transformations. The fundamental idea is that an effective policy should be generalizable across out-of-distribution data. Second, we propose a novel variant of conservative TD learning, utilizing an ensemble-based TD target to mitigate value overestimation, particularly in scenarios with limited offline data. Our empirical findings across two publicly available datasets demonstrate the superior performance of MetaTrader over existing methods, including both RL-based approaches and stock prediction models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement learning",
"stock markets",
"portfolio optimization"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/5cc31c034b89e8f8218839385bbab552f56bf433.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Bilevel Reinforcement Learning for Stock Data with A Conservative TD Ensemble"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zaoGCGLpux | RETHINK MAXIMUM STATE ENTROPY | main | Active | Reinforcement Learning for Exploration;Maximum Entropy;Intrinsic Rewards | reinforcement learning | 3;3;5;8 | 4;3;3;3 | 1;4;3;3 | 2;1;3;3 | 3;2;3;4 | 4.75 | 3.25 | 2.75 | 2.25 | 3 | -0.493742 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "Crucially, we do not believe it is appropriate to reject a paper solely because its claims and methods are perceived as too simple or lacking complex modifications, regardless of its correctness. The Heliocentrism, for instance, should not be dismissed simply because it is conceptually simpler or only slightly modifies the center while removing epicycles from the Geocentrism.\n\n[1] Mirco Mutti, Lorenzo Pratissoli, and Marcello Restelli. Task-agnostic exploration via policy gradient of a non-parametric state entropy estimate.\n\n[2] Alfréd Rényi. On measures of entropy and information. 1961.\n\n[3] Principe, Jose C. Information theoretic learning: Renyi's entropy and kernel perspectives. Springer Science & Business Media, 2010."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": {
"value": "We appreciate your detailed response, but we humbly disagree that the contribution of this paper is limited. This paper is the first to bridge two of the most important tracks on maximum state entropy (MaxEnt), helping to clarify the disorganized literature within this community. This work help prevent further misinterpretations in fundamentally misreading previous works including MaxEnt-H, MaxEnt-LA, and MEPOL [1], which have led to conclusions based on factual errors. We elaborate on these points in detail as follows.\n\n**Weaknesses:**\n\nWe believe you misinterpret the relationship between MEPOL and MaxEnts discussed in this paper. MEPOL is a particularly misleading work for readers seeking to understand MaxEnt, as it claims to improve MaxEnt-H by introducing kNN. Works like MaxEnt-LA cite it, but in fact, these works only reference MEPOL in passing, acknowledging its early connection to kNN, without ever directly comparing it or discussing it in detail. This has led some to assume a strong connection, which is, in fact, incorrect. Let us now address MEPOL in more detail.\n\nFrom a motivational perspective, MEPOL seeks to maximize the entropy of the average state distribution $H\\left(\\frac{1}{T}\\sum_{t=1}^{T}d_t^{\\pi}\\right)$, defined over episodes (note the index $t$ within an episode), while MaxEnt aims to directly maximize the state distribution $H(d^{\\pi})$. Theoretically, MEPOL establishes a projection from the state space to a space defined by the average probabilities within trajectories. This differs significantly from state entropy $H(d(s))$. The design of such projections generally results in fundamentally different information-theoretic metrics and properties. Further details on these information-theoretic definitions can be found in the [2, 3].\n\nMore specifically, given the definition $d_t^{\\pi}(s) = P(s_t = s | \\pi)$ and an episode $\\tau = \\{s_1, s_2, \\dots, s_T\\}$, MEPOL treats the $T$ different random variables in the episode, with a fixed hyperparameter $T$, and estimates the corresponding distributions $\\{d_1, d_2, \\dots, d_T\\}$. As a result, the agent is encouraged to explore novel states $s_t$ compared to the distribution $d_t$. Intuitively, MEPOL partly encourages exploration of novel episodes or trajectories, while MaxEnt focuses on exploring novel states. This is why we argue that MEPOL targets a trajectory-wise state entropy objective, placing it in a fundamentally different class.\n\nFrom an implementation perspective, MEPOL fundamentally does not present the algorithmic ideas used in Liu et al. or even MaxEnt-H. The key component of MaxEnt-LA is a non-stationary intrinsic reward function, which can be integrated into any RL algorithm (such as Q-learning, DQN, SAC, etc.) by substituting or augmenting the extrinsic rewards, much like the Approximation Oracles in MaxEnt-H. In contrast, the core of MEPOL is an implicit expression of the gradient of the trajectory-wise state entropy defined by itself, constrained by a KL divergence, within the framework of TRPO. Therefore, there is no need to discuss $\\delta$, as we cannot identify any algorithmic relationship between MEPOL and the MaxEnt framework, despite their sharing a similar high-level intuition.\n\n\n\n\nCLARITY and QUALITY:\n1. We recommend that the reviewer set aside any prior misconceptions about MEPOL and read MaxEnt-H, MaxEnt-LA, and our paper in sequence. We believe some misunderstandings arise from the confusion in previous works. For instance, in MaxEnt-H, the symbols $t$ and $T$ are used both for training iterations and as indices in the rollout, while in MEPOL, they are used exclusively for the rollout. We have taken care to reorganize these elements for clarity.\n2. We will present them in a concise manner."
},
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Largely the empirical questions above, which is what my review currently hinges on. In brief:\n\n* Can you run with more seeds?\n* Can you include much more information on experimental configuration in your paper? Especially, can you describe what an \"epoch\" means, and if epsilon-accurate stopping is used, how that works in practice? Can you clarify whether projection is only for entropy computation or control as well? I should in theory be able to recreate your experimental protocol from your paper, but at the moment that's the not the case.\n* As I understand, the NGU bonus as applied in the original paper (computed using previous states seen throughout a given trajectory) seems incorrect for a non-recurrent method. Would you share your thoughts on this? And can you comment on the non-stationarity introduced by NGU in relationship to the criticisms of MaxEnt-LA?\n* Can you explain the slight downward tick in the blue line, since that doesn't seem possible given the plot's description?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors do a good job of explaining the shortcomings of the bounds provided by Hazan. The thorough description of why small η leads to unrealistic T was very helpful for intuition. The proof result, if I understand it, is a significant improvement in that regard. I generally thought the mathematical writing in this paper was strong."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper provides a stronger proof for learning a policy that achieves maximum state-entropy, by using a simpler and more adaptive policy-sampling strategy. It further claims to unify two popular approaches to maximum-entropy RL. Empirical results suggest that this improved sampling strategy leads to more exploratory and uniform policies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I understand this is largely a theoretical paper, but nevertheless the experimental section of this paper is not very well-described or thorough. That’s the main reason for my “weak reject” – if the authors sufficiently address these concerns, I’ll gladly raise it.\n\nFrom the plots, it appears that the experiments are run with one seed – if so, this isn’t acceptable, but can be easily rectified.\n\nI’m very confused how you are using the NGU bonus, which is history-dependent, while not using a memory-based (e.g. LSTM) policy. Can you explain? The NGU bonus as I understand it doesn’t make much sense without recurrence. I also found it a bit unfair to criticize MaxEnt-LA for using a non-stationary reward when you use one here (of almost the same form, even).\n\nMany details are missing for experiment configuration as well. How much training data is gathered each epoch? How much training is done per epoch? Was there a concrete measurement for the “epsilon-accurate stopping criteria” in the empirical results, or was it based on the number of training steps (it's hard for me to understand what \"train to convergence\" means in the high-dimensional setting)? What does an “epoch” mean for MaxEnt-LA? What’s the scale of the NGU bonus? Are the projection functions consistent across methods (the random projections for each method need to have the same parameterization for the comparison to be fair)?\n\nHow can there be a slight downwards tick for the blue line for Ant for “total unique states visited during training”?\n\nTo clarify, in the appendix when you describe projecting down the states to a 7-dimensional space, that’s only for entropy calculation, correct? And not projecting down for control as well?\n\nI would also encourage the authors to choose a different name for their method. “MaxEnt-Veritas” seems to imply that the authors view their method as the “one final and true MaxEnt” method, while also implying that there was something false about prior work. The main difference between your method and Hazan is an improved policy-selection scheme, and so I don’t think this method is any more “truthful” than theirs, possibly just more efficient. And I’m sure someone (maybe even you) will at some point improve upon this method as well. I similarly think the title is not as informative about the methodology as it could be."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. I cannot see the reward functions in the three algorithms that are stationary and those that are not. In particular the sentence line 183 confuses me.\n2. Line 195 to line 199, might their be a confusion between MaxEnt-V and MaxEnt-H?\n3. Line 488, I suppose the entropy function is concave and not convex."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well written and easy to follow.\n2. The problem addressed is interesting to the community. \n3. The algorithm the authors propose is well motivated, with theoretical guarantees, and tested on several problems, where it also outperforms the alternative methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors study the two algorithms by Hazan (MaxEnt-H) and by Liu \\& Abbel (MaxEnt-LA), which both compute a mixture of policies to maximize the (discounted) state entropy of an agent evolving in an infinite-time MDP. A priori, the reward function they maximize is different, the weights of the mixture are different, and the number of policy updates are different before updating the intrinsic reward estimate. The authors nevertheless prove that (1) the intrinsic rewards both algorithms optimize are proportional to each other when computed using kNN, (2) the parameter $\\eta$ for computing the mixture in MaxEnt-H is unnecessary, and a uniform mixture as in MaxEnt-LA is sufficient (3) it is better to completely optimize the policy before updating the estimate of the intrinsic reward function, as advocated by Hazan in MaxEnt-H. Based on these three observations, the authors introduce a new algorithm (MaxEnt-V) by combining the methods from Hazan and Liu \\& Abbel. They remove the unnecessary steps from Hazan, and compute the mixture as in Liu \\& Abbel, but optimize the policy as in Hazan before updating the intrinsic reward estimate. The algorithm is $\\epsilon$-optimal, meaning that the agent is at most suboptimal by $\\epsilon$, and under some assumptions $\\epsilon$ decreases by $(B + \\beta \\ln T) / T$ where $T$ is the number of iterations and where $B$ and $\\beta$ are constant. In practice it performs at least as well as the algorithms from Hazan and from Liu \\& Abbel.\n\nWarning: I must apologize beforehand for not having checked the demonstrations in the appendix."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. I would appreciate if the authors could clarify if their algorithm is always to be favoured compared to that of Hazan and Liu \\& Abbel, or if there may be configurations in which their method would fail and where the others would not.\n2. Paragraph line 313 to line 317 is pretty unclear to me. Could the authors explicitly provide the order of $\\epsilon$ when $\\eta \\rightarrow 0$. Also, the bound of Hazan seems to be exponentially decreasing in T, wich a priori looks better than the bound of MaxEnt-V.\n3. In Figure 4, results are represented in term of epochs. To my understanding, an epoch is a step $t$ in algorithm 1. Then, as in MaxEnt-H and MaxEnt-V, and epoch fully optimizes the policy, I suppose it is also much longer in terms of wall-time, compared to MaxEnt-LA. How do the figures look like as a function of the wall-time?\n4. I do not agree with line 486, stating that the algorithms maximizing the action entropy lack the capability of exploring in absence of extrinsic reward. To me, without extrinsic rewards, these methods aim to explore the action space uniformly, which does not guarantee uniform state exploration. In opposition MaxEnt-H, MaxEnt-LA, and MaxEnt-V aim to explore the state space uniformly, which does not guarantee action space exploration. Both approaches have different intrinsic motivation (i.e., exploration objective), and it is possible to construct examples for which uniform action exploration outperforms uniform state exploration, and vice versa.\n5. In paragraph line 508, there is an important distinction to make between parametric methods. Some are explicitly based on the entropy. The intrinsic motivation is to maximize the entropy of some distribution, which is typically approximated with a neural density estimator. Other methods are based on the uncertainty of some model. The intrinsic motivation is to take actions for which a parametric model over states and/or rewards provides different outcomes compared to the MDP realization. The distinction is particularly important in the current work as the first class of algorithms optimizes the same objective as the method the authors presented (and may have been added in the experiments). I think in particular that the related work should include [1, 2, 3], and probably other, more recent, works.\n\n[1] Lee, L., Eysenbach, B., Parisotto, E., Xing, E., Levine, S., & Salakhutdinov, R. (2019). Efficient exploration via state marginal matching. arXiv preprint arXiv:1906.05274.\n\n[2] Guo, Z. D., Azar, M. G., Saade, A., Thakoor, S., Piot, B., Pires, B. A., ... & Munos, R. (2021). Geometric entropic exploration. arXiv preprint arXiv:2101.02055.\n\n[3] Islam, R., Ahmed, Z., & Precup, D. (2019). Marginalized state distribution entropy regularization in policy optimization. arXiv preprint arXiv:1912.05128."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Did I misinterpret something within points 1-3 above that renders the conclusion fundamentally wrong?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "CLARITY:\n- The paper presents a clear and detailed comparison between two existing RL algorithms for state entropy maximization. \n- The intentions of the paper are clearly specified from the abstract and followed along the paper in a clear and coherent manner.\n\nQUALITY:\n- After a brief check of the proof, the deviation from the original analysis of Hazan et al. of Theorem 1 (pag. 14 of the paper) seems correct and well explained.\n\nORIGINALITY:\n- Although maybe common in other areas, I am not aware of other works in this context leveraging the Euler-Mascheroni constant to analyze the telescoping sum as done in the proof of Theorem 1.\n\nSIGNIFICANCE:\n- The problem tackled within the paper, namely state entropy maximization, is a fundamental problem for RL as it tackles from first principles the issue of exploration in RL. As a consequence, investigation in this direction is highly relevant for RL and beyond.\n- Theorem 1 closes a gap between theory and practice in the choice of policy sampling schedule."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper first presents two established approaches to tackle the maximum state entropy maximization problem in reinforcement learning. Then, they first show how the two algorithmic schemes fundamentally aim to solve the same formal problem and present limitations of both schemes. Crucially, they identify that the policy sampling schedule is sub-optimal for one, while the policy update strategy is sub-optimal for the other one. Towards bridging the two schemes in a unified manner and overcoming these issues they propose a new algorithm, provide a theoretical analysis of its finite-time sub-optimality, and provide experimental comparisons with the previously mentioned existing algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "ORIGINALITY and SIGNIFICANCE:\nUnfortunately, the paper seems to suffer a quite fundamental issue in terms of originality and significance because of the following points holding together:\n1) The work fundamentally aims to 'build a unified perspective' on the maximum state entropy problem by bridging two established schemes, namely the algorithms presented by Hazan et al. (MaxEnt, here renamed MaxEnt-H) and by Liu et al. (APT, here renamed MaxEnt-LA).\n2) Crucially, the work by Liu et al. cites (and seems to build on) work [1]. This work fundamentally already presents the algorithmic ideas used in Liu et al. (namely the non-parametric entropy estimate to scale to non-tabular domains) in the context of the maximum state entropy problem presented by Hazan et al. (Sec. 4 of [1]). Moreover, it proposes an algorithm, named MEPOL, that seems to be nearly analogous to the one proposed by the authors (MaxEnt-Veritas) as a subcase. In particular, by choosing a high value of $\\delta$ in MEPOL, it seems that the policy update scheme corresponds to the one in MaxEnt-Veritas (as in Hazan et al.), while using the policy sampling scheme as in Liu et al.\n3) The authors cite [1] both in the Introduction and Related Works section, where they claim that [1] 'focuses on maximizing trajectory-wise state entropy' which is a 'fundamentally different objective'. Although this is the case for later works of the same author that alike the mentioned work by Jain et al. optimize trajectory-wise state entropy, this does not seem to be the case in [1], where the notion of entropy in MEPOL is not trajectory-wise.\n\nAs a consequence, it seems to me that the authors aimed to bridge two works that were already deeply (historically and formally) connected by a misinterpreted well-established reference. Although the submitted paper brings a new theoretical result (Theorem 1) by a slight modification of the analysis in Hazan et al., this seems very limited in terms of contribution and novelty compared with what the authors claim (e.g., in the abstract), which was arguably already achieved in large part. \n\nCLARITY and QUALITY:\n- I believe that the first 5 pages of the paper can be significantly sharpened in their presentation, which currently seems loose and slightly hard to follow at points.\n- I would suggest to present Propositions 2 and 3 not as propositions as they seem simple calculations based on existing theorems and could be integrated within the text to improve the flow of the paper.\n\n\n[1] Mirco Mutti, Lorenzo Pratissoli, and Marcello Restelli. Task-agnostic exploration via policy gradient of a non-parametric state entropy estimate."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "I mostly do not have direct questions. Some are reported in the comments above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- The paper provides a unifying perspective on the literature of maximum state entropy;\n- The paper proposes an interesting implementation change for maximum state entropy algorithms, which prescribes to freeze the intrinsic reward for multiple policy optimization steps (like in a Frank-Wolfe routine);\n- The paper makes an effort to provide theoretical ground to the proposed method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses maximum state entropy exploration in MDPs without rewards. First, it analyzes and compare previous approaches to state entropy maximization, namely a Frank-Wolfe algorithm (like MaxEnt, Hazan et al. 2019) and a policy optimization algorithm (like APT, Liu & Abbeel, 2021). The paper shows that the two approaches share the same reward function when the entropy is estimated via kNN. Then, the paper proposes a new algorithm, called MaxEnt-V, incorporating the best of both approaches. The method is demonstrated to lead to approximately optimal policies and empirically validated in a set of Mujoco environments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "EVALUATION\n\nThis paper provides some fresh ideas, mostly that the previous approaches for maximum state entropy can be connected into a \"unifying\" algorithm and that the entropy intrinsic reward shall be frozen for multiple policy optimization steps, which bears some resemblance to the target network trick for deep Q-learning. Unfortunately, in my opinion the paper fails to provide convincing evidence that the frozen reward trick gives empirical benefit to justify widespread adoption. More broadly, I think the paper falls short of the technical quality required to be accepted at ICLR. I provide below a summary of what I believe are the main weaknesses of the paper and further comments.\n\nMAJOR WEAKNESSES\n\n1) Misunderstandings of the literature\n 1. The paper presents MaxEnt-H and MaxEnt-LA as two competing approaches for maximum state entropy. Whereas different design choices can be extracted on an abstract level, the authors seem to misunderstand the purpose of the two papers. Hazan et al. were the first to introduce the problem of maximum state entropy in MDPs. They show the latter problem is not hopeless, despite being non-convex in the policy parameters, by providing a provably efficient algorithm. While an implementation and experiments are provided, the algorithm is mostly a theoretical and analytical tool. Instead, Liu & Abbeel's paper falls into a stream of practical methods for maximum state entropy, where the main advancements were given by the use of kNN entropy estimators in place of state densities (this technique has been introduced by Mutti et al., 2021 not by Liu & Abbeel as stated in the manuscript) and learning representations.\n 2. The paper suggests that MaxEnt-LA trains a mixture with uniform parameters. I am not sure this is the case. From my understanding, it does sample uniformly from a replay buffer, which may include transitions coming from previous policies, but it does that only to perform updates on the *last* policy. Indeed, the output is the last policy network, which is then fine-tuned with external reward, and not a mixture. There seems to be an important gap here.\n 3. The paper says that another stream of work (Mutti et al 2021, Jain et al 2024) optimize the trajectory-wise entropy. Perhaps this distinction shall be clarified. From my understanding, the objective of Mutti et al 2021 is the same of Eq. 2, although the entropy reward is not decomposed in state terms as in Liu & Abbeel 2021. \n\n2) Some claims look subjective and lack strong support\n 1. \"Tuning $\\eta$ is unnecessary in MaxEnt-H\". Since the purpose of tuning $\\eta$ comes from the analysis in Hazan et al., 2019, the paper should show that similar convergence guarantees can be obtained with a uniform mixture to support this claim. Showing that $\\eta$ is always small and tuning is unnecessary in practice is not enough.\n 2. \"Freezing the reward is better\". Theorem 1 only provides an upper bound on the sub-optimality. It is rather weak to say that freezing the reward is better because it leads to a smaller upper bound. Maybe the upper bounds are just not tight, and the analysis would say nothing about which one (freezing the reward or changing it at any step) is better.\n 3. Theorem 1. I have some concerns on the validity of this result. First, the statement assumes that $H_{kNN}$ is smooth and bounded, which does not seem to be the case by staring at Eq. 6. Can the authors provide more details on when those conditions are met? Moreover, what does it mean to assume access to estimation oracle (as in lien 310)? Note that the kNN entropy estimators are biased, does that mean $\\epsilon_0$ error on the biased estimate or the true entropy?\n\n3) The empirical analysis looks very far from the standards of the community\n - Are the curves in the figures reported on a single run? Some details seem to be missing on how the experiments are conducted to meet some statistical significance (e.g., look at https://ojs.aaai.org/index.php/AAAI/article/view/11694 and https://proceedings.neurips.cc/paper_files/paper/2021/hash/f514cec81cb148559cf475e7426eed5e-Abstract.html)\n - Previous papers, especially Hazan et al. 2019 and Liu & Abbeel 2021 have public implementations. To claim MaxEnt-V gives benefits over them, it would be better to compare its performance with the official implementations of MaxEnt-H and MaxEnt-LA.\n\nOTHER COMMENTS\n\nThe literature of maximum state entropy could be presented better, especially considering that this manuscript builds over them. The first papers on this problem have been Hazan et al 2019; Lee et al 2019, Mutti & Restelli 2020), which presented algorithms requiring state density estimation, which is mostly impractical in high dimensions. To overcome this issue, Mutti et al 2021 proposed to use kNN entropy estimators (Singh et al 2003 and others) to guide policy optimization without explicit state density estimation. Mutti et al 2021 compute kNN distances on the state features, which is not suited for images. Liu & Abbeel 2021 coupled kNN estimators with contrastive representations (together with various other implementation changes, such as state-based rewards, actor-critic architecture with replay buffers). Other representations have been proposed by subsequent works, such as Seo et al 2021, Yarats et al 2021. Several other works followed on both practical methodologies and theoretical analysis of maximum state entropy. Some relevant references that does not seem to be mentioned:\n- Lee et al., Efficient exploration via state marginal matching, 2019; \n- Mutti & Restelli, An intrinsically-motivated approach for learning highly exploring and fast mixing policies, 2020; \n- Guo et al., Geometric entropic exploration, 2021;\n- Liu & Abbeel, Aps: Active pretraining with successor features, 2021;\n- Mutti et al., Unsupervised reinforcement learning in multiple environments, 2022;\n- Mutti et al., The importance of non-Markovianity in maximum state entropy exploration, 2022;\n- Mutti, Unsupervised reinforcement learning via state entropy maximization, 2023; \n- Yang & Spaan, CEM: Constrained entropy maximization for task-agnostic safe exploration, 2023; \n- Zisselman et al., Explore to generalize in zero-shot rl, 2023;\n- Zamboni et al., How to explore with belief: State entropy maximization in pomdps, 2024;\n- Zamboni et al., The limits of pure exploration in POMDPs: When the observation entropy is enough, 2024.\n\nDespite my concerns expressed above, I think the idea of the frozen rewards to improve maximum state entropy approaches is very interesting and worth studying. Perhaps the authors could think of restructuring the paper and their analysis to focus on the empirical benefit that this trick may provide and the (stability) issues that may arise from chasing the non-stationary reward.\n\nMINOR\n- Perhaps clarify the meaning of MaxEnt-H and MaxEnt-LA earlier in the text\n- l.48 \"While these importance sampling-based methods\" -> what does that mean?\n- Eq. 2 min -> max\n- Algorithm 2, line 2: how many states are sampled?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Simpliset Maximum State Entropy Ever."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024rethink,\ntitle={{RETHINK} {MAXIMUM} {STATE} {ENTROPY}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zaoGCGLpux},\nnote={under review}\n}"
},
"abstract": {
"value": "In the absence of specific tasks or extrinsic reward signals, a key objective for an agent is the efficient exploration of its environment. A widely adopted strategy to achieve this is maximizing state entropy, which encourages the agent to uniformly explore the entire state space. Most existing approaches for maximum state entropy (MaxEnt) are rooted in two foundational approaches, which were proposed by Hazan and Liu \\& Abbeel, respectively. However, a unified perspective on these methods is lacking within the community.\n\nIn this paper, we analyze these two foundational approaches within a unified framework and demonstrate that both methods share the same reward function when employing the $k$NN density estimator. We also show that the $\\eta$-based policy sampling method proposed by Hazan is unnecessary and that the primary distinction between the two lies in the frequency with which the locally stationary reward function is updated. Building on this analysis, we introduce MaxEnt-(V)eritas, which combines the most effective components of both methods: iteratively updating the reward function as defined by Liu \\& Abbeel, and training the agent until convergence before updating the reward functions, akin to the procedure used by Hazan. We prove that MaxEnt-V is an efficient $\\varepsilon$-optimal algorithm for maximizing state entropy, where the tolerance $\\varepsilon$ decreases as the number of iterations increases. Empirical validation in three Mujoco environments shows that MaxEnt-Veritas significantly outperforms the two MaxEnt frameworks in terms of both state coverage and state entropy maximization, with sound explanations for these results."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Reinforcement Learning for Exploration",
"Maximum Entropy",
"Intrinsic Rewards"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/ec0a8513b3a8f865406b53636d59eb1ce8563b41.pdf"
},
"presentation": null,
"primary_area": {
"value": "reinforcement learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/356fff8213a29805c68c692351f77a6d924688c5.zip"
},
"title": {
"value": "RETHINK MAXIMUM STATE ENTROPY"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zaxyuX8eqw | GraphFM: A generalist graph transformer that learns transferable representations across diverse domains | main | Active | graph transformer;multi-graph training;graph foundation model;node classification | learning on graphs and other geometries & topologies | 3;3;3;3;5 | 3;3;4;3;3 | 2;3;2;2;2 | 2;2;2;2;2 | 3;3;2;3;3 | 3.4 | 3.2 | 2.2 | 2 | 2.8 | -0.25 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please consider the weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors develop a multi-graph pretraining approach to learn GraphFM, enabling an ability to handle diverse data across a variety of domains."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose GraphFM, a pre-training approach for learning on a variety of graph datasets. The authors posit that GraphFM mitigates the need for personalization of learning graph neural networks on a particular dataset, and thus offering a scalable backbone for a variety of graph learning tasks. The paper demonstrates that GraphFM is competitive in its performance, specifically on node classification task."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The contributions of the paper are fairly limited, as the authors have not established the premise of developing their foundation models vis-a-vis some of the existing works, and the evaluation is also not convincing. \n\nFirst, I recommend that the authors consider the survey paper, \"A Survey on Self-Supervised Graph Foundation Models: Knowledge-Based Perspective,\" and also the tutorial on Graph Foundation Models in WWW'24. The authors have not cited the former, and also not compared and contrasted with the methods discussed in the survey paper. \n\nSecond, the authors should also look at, \"Learning MLPs on Graphs: A Unified View of Effectiveness, Robustness, and Efficiency,\" in ICLR'24. While the paper is not focused on pre-training, but the MLP construct holds similarities to the work developed by the authors in their paper. \n\nThird, the authors do not provide any context on why the particular data were used for validation, and not others. As such, it is fairly unconvincing, and there are also no statistical significance offered in the table of results. \n\nFourth, the authors have not compared their performance to heterogeneous graph neural networks (there are a number of recent papers on this topic), and hence the performance comparisons are not appropriately contextualzied, especially on heterogeneous graphs. \n\nFifth, why the focus only on node classification? If it is a pre-trained foundation models, then should there not be more generalizability offered on downstream tasks?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Traning one model for multiple graphs is not new, in this work, it seems that the backbone is just chaaning from GNN to Graph transformer. Any different insight?\n\n2. Th baseline is also a little old."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The entire paper is well presented and the authors give the details on experiments.\n\n2. the code is available and the reprodubility should be good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Using graph transfromer for multiple graph training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The novelty of this work is not high. It mainly uses the graph transformer cimbining with some engineering effort, like distributed tranining. \n\n2. The paper claim that most GNN train on individual graph, which is not true. GNNs can tranin on mutiple grpah as well.\n\ni) GPT-GNN: Generative Pre-Training of Graph Neural Networks, KDD 2020\nii) GCC: Graph Contrastive Coding for Graph Neural Network Pre-Training, KDD 2020\n\n3. Traning one model for multiple graphs is not new, in this work, it seems that the backbone is just chaaning from GNN to Graph transformer. Any different insight?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- What's the difference between the issue you aim to solve in this paper and the open-set graph domain adaption [3]? \n- The authors should provide a theoretical analysis of the attention mechanism and position encoding in GraphFM when deployed on large-scale graphs, similar to the approach in [4].\n- More recent large-scale graph learning methods should be introduced in the experimental part for fair comparisons, such as Gapformer [1], G-Adapter [2], and ATP [5].\n\n[3] Yin N, Wang M, Chen Z, et al. DREAM: Dual structured exploration with mixup for open-set graph domain adaption[C]//The Twelfth International Conference on Learning Representations. 2024.\n\n[4] Li H, Wang M, Ma T, et al. What Improves the Generalization of Graph Transformers? A Theoretical Dive into the Self-attention and Positional Encoding[C]//Forty-first International Conference on Machine Learning.\n\n[5] Li X, Ma J, Wu Z, et al. Rethinking Node-wise Propagation for Large-scale Graph Learning[C]//Proceedings of the ACM on Web Conference 2024. 2024: 560-569."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- GraphFM focuses on learning transferable representations by pretraining on diverse graph datasets from various domains, which helps GraphFM to generalize well across different types of graphs without the need for tuning for each new task.\n- The DistributedSSSampler proposed in GraphFM can improve the efficiency of sampling in large-scale graph learning by distributing the sampling process across multiple devices, which reduces memory bottlenecks and accelerates training."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose a novel scalable framework, GraphFM, for pretraining Graph Neural Networks (GNNs) across multiple graph datasets. They introduce a Perceiver-based transformer encoder to compress graph-specific details into a shared latent space, enhancing GraphFM's generalization across diverse domains. The authors propose a new sampling method called DistributedSSSampler to improve sampling efficiency in large-scale graph datasets. Experimental results demonstrate that pretraining on diverse graph structures and scaling both model size and dataset diversity helps GraphFM achieve competitive performance in node classification tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Some notations are not clearly defined. For example, the expression $\\tilde{\\mathbf{u}}_i=\\operatorname{MLP}_g\\left(\\mathbf{u}_i\\right)$ appears only once in the paper, and the meaning of $\\tilde{\\mathbf{u}}_i$ is unclear. Additionally, the calculation of the position encoding $\\mathbf{p}_i$ and how $\\mathbf{x}_i$ concatenates a projection of the node features are not sufficiently explained.\n- The novelty of GraphFM architecture is limited. GraphFM builds upon transformer-based architectures like the Perceiver to encode graph-specific details into a shared latent space. Although this improves generalization across diverse domains, the underlying architecture does not introduce fundamentally new mechanisms for graph representation learning. A similar mechanism has also been widely used in some related research papers, such as in [1] and [2]. \n- The experimental results are not convincing, and the baseline methods need to be stronger. Although the authors compare the GraphFM method with some widely used baseline methods like GCN, GAT, SAN, and NAG, these are neither the latest nor the most competitive, undermining the reported performance's significance. Additionally, GraphFM does not achieve state-of-the-art (SOTA) performance in most cases.\n\n[1] Liu C, Zhan Y, Ma X, et al. Gapformer: Graph Transformer with Graph Pooling for Node Classification[C]//IJCAI. 2023: 2196-2205.\n\n[2] Gui A, Ye J, Xiao H. G-adapter: Towards structure-aware parameter-efficient transfer learning for graph transformer networks[C]//Proceedings of the AAAI Conference on Artificial Intelligence. 2024, 38(11): 12226-12234."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The writing is clear."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "GRAPHFM uses a Perceiver-based encoder to unify various graph features into a shared latent space, allowing for cross-domain adaptability. The model was trained on a large dataset covering diverse domains and evaluated on a range of classification tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. How does the model handle more challenging graph tasks, such as link prediction, given its emphasis on node classification?\n2. The paper’s focus on node classification limits its scope and raises concerns about the broader applicability of the approach, especially given the minimal evaluation on other graph tasks.\n3. The method is not novel. The paper is more like a technical report."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper is well-written and easy to understand.\n\n2. To my knowledge, this is the first time a single graph encoder has been trained on 152 different graph datasets and evaluated for its effectiveness—a significant and commendable achievement.\n\n3. The evaluation of the model is comprehensive."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors introduce GraphFM, a transformer-based model designed for pre-training on multiple graph datasets and fine-tuning on downstream tasks. They pre-trained GraphFM on 152 diverse graph datasets and developed several strategies to improve the speed and efficiency of the pre-training process. Following pre-training, the model was fine-tuned and evaluated on various downstream graph datasets with distinct characteristics. The results demonstrate consistent performance improvements as model size and data scale increase."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A primary limitation of the model is its dependency on unique initial MLPs and final predictors for each graph dataset, which necessitates fine-tuning for every new dataset or task. This requirement significantly hinders the model’s practicality in real-world applications.\n\n2. The pre-training results were largely anticipated, given the model’s supervised training approach. However, in real-world scenarios, labeled data is often scarce, making these results difficult to scale for large-scale graph model training. Notably, what stands out about large language models (LLMs) is their ability to leverage self-supervised learning, achieving scaling laws and emergent abilities.\n\n3. Despite extensive pre-training and fine-tuning on downstream datasets, the model’s performance only matches that of specialized models, without offering substantial improvement. Considering that the model still requires fine-tuning on new datasets, it raises questions about the benefits of this approach over simply training specialized models directly.\n\n4. In Figure 5, the authors compare the proposed model to GCN and NAG on heterophilic graphs and the Coauthor-CS dataset. Could the authors provide comparisons between the proposed model and models with comparable performance on homophilic graphs? Additionally, how does varying the hyperparameter settings affect the performance of the proposed model?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "This work introduces GraphFM, an approach designed for learning across diverse graph datasets, allowing generalization and strong performance across multiple domains with a single model."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024graphfm,\ntitle={Graph{FM}: A generalist graph transformer that learns transferable representations across diverse domains},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zaxyuX8eqw},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph neural networks (GNNs) are often trained on individual datasets, requiring specialized models and significant hyperparameter tuning due to the unique structures and features of each dataset. This approach limits the scalability and generalizability of GNNs, as models must be tailored for each specific graph type. To address these challenges, we introduce GraphFM, a scalable multi-graph pretraining approach designed for learning across diverse graph datasets. GraphFM uses a Perceiver-based encoder with learned latent tokens to compress domain-specific features into a shared latent space, enabling generalization across graph domains. We propose new techniques for scaling up graph training on datasets of different sizes, allowing us to train GraphFM on 152 distinct graph datasets, spanning 7.4 million nodes and 189 million edges. This allows us to study the effect of scale on pretraining across domains such as molecules, citation networks, and product graphs, and show that training on diverse datasets improves performance over single-source pretraining. Our results demonstrate that pretraining on diverse real and synthetic graphs enhances adaptability and stability, leading to competitive performance with state-of-the-art models across various node classification tasks. This approach reduces the burden of dataset-specific training and provides a single generalist model capable of performing across multiple diverse graph structures and tasks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"graph transformer",
"multi-graph training",
"graph foundation model",
"node classification"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/296f7a82da0c2211cf9012aeab8076a7e6b47d05.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/59e980c3bb936ebbf81745b35a3cb875d335b68c.zip"
},
"title": {
"value": "GraphFM: A generalist graph transformer that learns transferable representations across diverse domains"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zb1UI74kxA | How Many Van Goghs Does It Take to Van Gogh? Finding the Imitation Threshold | main | Active | Data Interpretability;Privacy;Text-to-Image Models | interpretability and explainable AI | 5;5;5 | 5;4;3 | 2;3;2 | 2;2;2 | 3;3;2 | 5 | 4 | 2.333333 | 2 | 2.666667 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Mentioned in the summary section"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Mentioned in the summary section"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper makes the following claims:\n- Relationship between a \"concept's\" frequency in training dataset and the ability of a text to image model to imitate it. They find the threshold to be 200-600 images \n- Propose an efficient approach to estimate the imitation threshold \n\n\n\nI have the following questions and suggestions. I am willing to change my review based on the authors' response.\n\n1. The paper does not define \"concepts,\" assuming they are either self-explanatory or domain-specific. This assumption introduces potential bias or error in the paper's claims.\n - How distinct should concepts be from one another to qualify as separate concepts? How is the distance between concepts computed? For instance, many artists share \"styles\" due to shared lineage, schools, or subjects, which can significantly impact the imitation threshold. Broad art styles like \"Avant-Garde,\" \"Deco,\" or \"Baroque\" are loosely defined. Depending on how a concept is defined, the result of 200-600 images could vary significantly. \n\n - Many concepts, such as art styles, overlap and are composite. For example, Cubism, Futurism, and Constructivism share many features, as do Realism, Photorealism, and Precisionism, or Expressionism, Fauvism, and Die Brücke. Furthermore, concepts like \"cubist impressionism\" merge cubism and impressionism. The paper does not provide details on defining a concept or its boundaries, making it difficult to justify the main motivation and claim of efficiently estimating the imitation threshold of a \"concept.\" In cases of composite and overlapping concepts, learning one concept may transfer to another, leading to erroneous imitation thresholds. This makes it hard to justify the paper's main motivation and claim (L91-95, L21-25) of that of efficiently estimating the imitation threshold of a \"concept\". \n\n\n1. L148-151 - How do the authors make sure that the increasing number of samples of a particular concept is *causing* the models to learn to imitate? For example, how do we discount the case that if we train a diffusion model on 1 million concepts with each concept represented by 100 images (well below the imitation threshold of any individual concept), the model may still be able to imitate a particular concept due to an overall increase in general image generation understanding? Again, this relates to the main claim of the paper (L91-95, L21-25) of that of efficiently estimating the imitation threshold of a \"concept.\" The paper seems to address a causal question without providing sufficient causal analysis.\n\n1. The assumptions of the paper are too strict, which makes the method to be of much practical relevance. \n - Particularly, the following two assumptions are idealistic: distributional invariance between the images of all concepts in a domain and no confounders between the imitation score and image count. Can the authors justify these assumptions? Further, can the authors show experiments upon loosening these assumptions? \n - Can we introduce error bounds to account for the strict assumptions? This is important, especially since the abstract and introduction both try to answer a legal question (L24-25, L36-39) with a very prescriptive answer in the abstract, introduction, and results section (200-600 images in general and specific numbers like 364 and 234 images for different models in L342-355) without contextually mentioning the strict assumptions under which their analysis holds true or mentioning the error bounds. One gets to know the limiting assumptions, only when one reads the paper in details. While this may not be author's intention, but this gives a false assumption about the correctness/error bounds of the various imitation threshold numbers.\n\n\n1. Another (strict) assumption not listed in the assumption section is L240-242 \"prompts are distinct from the captions used in the pretraining dataset to minimize reproduction of training images\". (Since the paper mentions the case of StabilityAI) Interestingly, In the famous New York Times vs. OpenAI lawsuit, the lawyers tested GPT, especially flouting this assumption. They start the prompt with the first few words of New York Times articles and let GPT complete the rest of the article [1]. Their case mentions many examples where exact reproduction is observed. This is an example of an imitation. How does the paper address such (actual) cases of imitations when discussing the \"imitation threshold\"? Further, this assumption should be noted in the assumption section and when claiming prescriptive thresholds of 200-600 images.\n\n\n\n1. In Sec-5.2, I could not understand why the same classifier couldn't be used as was trained in Sec-5.1 (potentially, along with the other classifiers the paper uses).\n\n\n\n\n[1] https://nytco-assets.nytimes.com/2023/12/NYT_Complaint_Dec2023.pdf"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Mentioned in the summary section"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weaknesses section and address the concerns listed."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The investigated problem of imitation in text-to-image models is both prevalent and significant. \n\n* By introducing novel methods for estimating concept frequency, the proposed MIMETIC2 is carefully designed to minimize the influence of confounding factors. \n\n* This paper marks the first effort to provide precise estimates of the imitation threshold, offering implications that could benefit both technical advancements and policy-making."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper formalizes and investigates a novel problem of Finding the Imitation Threshold (FIT), which aims to determine the imitation threshold for two representative image types: human faces and art styles. Comprehensive empirical analysis of Stable Diffusion models and their respective pre-training datasets reveals that the imitation threshold for these models ranges between 200 and 600 images, depending on the setup. The estimated thresholds have important implications for users and developers, potentially serving as a basis for copyright and privacy complaints."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* While this paper represents a novel attempt to determine the imitation threshold of concepts in text-to-image models, its technical implications are unclear. The memorization phenomena in LLMs [1] and VLMs [2], along with their training dynamics, are well-explored areas. Although the paper introduces concept frequency and imitation score estimation methods to establish a more precise threshold, it is not evident what technical insights this threshold provides. Apart from conceptual policy-making related implications (i.e., informing text-to-image model developers what concepts are in risk of being imitated, and serving as a basis for copyright and privacy complaints), how can this metric possibly be adopted to advance imitation mitigation strategies, such as those outlined in Appendix A? This is an important and practical issue that must be clearly explained.\n\n* Additionally, the analysis presents conflicting explanations for threshold differences. In Line 347, the authors attribute the higher threshold in SD2.1 to its larger LAION-5B pre-training dataset, while in Line 350, they suggest differences in text encoders between SD2.1 and SD1.5 as the key factor. This raises an important question: what is the primary driver of these threshold differences—dataset size or text encoder architecture? Moreover, the paper does not explore other potentially influential factors, such as model size, which limits the comprehensiveness of the analysis.\n\n[1] Kushal Tirumala, Aram Markosyan, Luke Zettlemoyer, Armen Aghajanyan. \"Memorization Without Overfitting: Analyzing the Training Dynamics of Large Language Models.\" NeurIPS 2022.\n\n[2] Jie Ren, Yaxin Li, Shenglai Zeng, Han Xu, Lingjuan Lyu, Yue Xing, Jiliang Tang. “Unveiling and Mitigating Memorization in Text-to-Image Diffusion Models Through Cross Attention.” ECCV 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. I hope the author can further clarify the given experiment found that the imitation threshold is 200-600 images. Is it a generalized result in a wide range of scenarios, such as other datasets Laion-400m [1], so as to truly provide valuable guidance for judging whether the infringement claim is established and guiding developers to avoid infringement?\n\n2. If the imitation threshold is not reached, can it be considered that there is no infringement?\n\n3. Can the quantitative relationship between Concept Similarity and Concept Frequency be given? \n\n4. Add optimal approach to find the imitation threshold (Pearl (2009)) as a baseline comparison to verify the correctness of the MIMETIC2 method will be better to evaluate whether the improvement in time efficiency lead to a loss in accuracy of imitation threshold estimation.\n\n[1] Schuhmann, C., R. Kaczmarczyk, A. Komatsuzaki, A. Katta, R. Vencu, R. Beaumont, J. Jitsev, T. Coombes, and C. Mullis (2021). “LAION-400M: Open Dataset of CLIPFiltered 400 Million Image-Text Pairs”. In: NeurIPS Workshop Datacentric AI. FZJ-2022-00923. Jülich Supercomputing Center."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors try to determine the number of training samples required for a text-to-image model to truly imitate a specific concept, and the problem is well-motivated.\n\nThe manuscript has clear logic, from the definition of the problem, rigorous assumptions statement, and experimental verification.\n\nThe authors discuss the limitations of the current work in assumptions and problem simplification and clarify valuable future work"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The author experimentally explored the number of data samples required for a text-to-image model to generate outputs that exceed the imitation threshold for a given concept, meaning that truly convincing imitation can be considered to exist. The author proposed MIMETIC2 to address the high computational cost of existing approaches."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The findings are based on limited experiments, which constraints the insights of the results. Under the strict assumptions set by the author, giving quantitative relationship between Concept Similarity and Concept Frequency or an imitation threshold that generalizes to other datasets, such as Laion-400m [1], will be more meaningful. For example, if the quantitative relationship shows exponential decay or exponential increase, it would reflect the possibility of whether a small sample is sufficient to raise infringement issues in a social sense, giving the model trainer a guidance on data usage.\n\n2. The experimental results show limited insights, the types of datasets and models are limited, and are tailored to meet the assumptions pointed out by the author. Although the author lists the challenges encountered in the experiment, such as outliers, there lacks a discussion about feasible solutions. In general, the scenarios considered are too few and too strict, which is far from the practicality of achieving the goal of the paper demonstrated in Section 1, such as judging whether the infringement claim is established and guiding developers to avoid infringement.\n\n3. It is recommended that the author add optimal approach to find the imitation threshold (Pearl (2009)) as a comparison to verify the correctness of the imitation threshold calculated by the proposed MIMETIC2 method.\n\n[1] Schuhmann, C., R. Kaczmarczyk, A. Komatsuzaki, A. Katta, R. Vencu, R. Beaumont, J. Jitsev, T. Coombes, and C. Mullis (2021). “LAION-400M: Open Dataset of CLIPFiltered 400 Million Image-Text Pairs”. In: NeurIPS Workshop Datacentric AI. FZJ-2022-00923. Jülich Supercomputing Center"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We find the number of images of a concept (e.g., a person's face or an artist's style) that a text-to-image model needs in order to imitate it."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024how,\ntitle={How Many Van Goghs Does It Take to Van Gogh? Finding the Imitation Threshold},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zb1UI74kxA},\nnote={under review}\n}"
},
"abstract": {
"value": "Text-to-image models are trained using large datasets collected by scraping image-text pairs from the internet. These datasets often include private, copyrighted, and licensed material. Training models on such datasets enables them to generate images with such content, which might violate copyright laws and individual privacy. This phenomenon is termed imitation -- generation of images with content that has recognizable similarity to its training images. In this work we study the relationship between a concept's frequency in the training dataset and the ability of a model to imitate it. We seek to determine the point at which a model was trained on enough instances to imitate a concept -- the imitation threshold. We posit this question as a new problem: Finding the Imitation Threshold (FIT) and propose an efficient approach that estimates the imitation threshold without incurring the colossal cost of training multiple models from scratch. We experiment with two domains -- human faces and art styles -- for which we create four datasets, and evaluate three text-to-image models which were trained on two pretraining datasets. Our results reveal that the imitation threshold of these models is in the range of 200-600 images, depending on the domain and the model. The imitation threshold can provide an empirical basis for copyright violation claims and acts as a guiding principle for text-to-image model developers that aim to comply with copyright and privacy laws. We will release the code and data upon publication."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Data Interpretability",
"Privacy",
"Text-to-Image Models"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/67101bec56d8e528924f119f66850c8bb96757be.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/b83856adff972710c3ec29bbe010c5bee8745018.zip"
},
"title": {
"value": "How Many Van Goghs Does It Take to Van Gogh? Finding the Imitation Threshold"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zbIS2r0t0F | Allostatic Control of Persistent States in Spiking Neural Networks for Perception and Computation | main | Active | Allostatic;Dynamic;Attractors | applications to neuroscience & cognitive science | 3;3;3;3;5 | 4;4;3;3;3 | 2;2;1;2;2 | 2;2;2;2;2 | 2;2;1;2;2 | 3.4 | 3.4 | 1.8 | 2 | 1.8 | -0.408248 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "-I suggest to quickly introduce the Hammel model, for helping the reader.\n\n-Could the authors elaborate on the biological plausibility of using the Hammel model, specifically in the context of numerical cognition? Are there any alternative biological mechanisms that might be more relevant?\n\n-How does AlloNet compare to other models of numerical cognition in terms of performance and biological plausibility?\n\n-What are the potential implications of this model for understanding cognitive deficits or disorders related to numerical processing?\n\nMinor\n\n-Caption of Fig5 should be more clear. E.g. what is panel B exactly, make clearer what colors refer to. (are bumps of different ring attractors?).\n\n-References to figure 4 should be “Fig4” rather than just “4”.\n\n-There is a missing dot at the end of the abstract, and at the end of figure 4 caption.\n\n-fig1d is not readable"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Novelty of the Approach\nIntegrating an allostatic control mechanism into a spiking neural network architecture is a novel approach with potential implications for understanding self-regulation in neural systems. \n\n2. Dynamic Control of Persistent States\nThe model successfully demonstrates the dynamic control of persistent states in response to environmental changes, a crucial aspect of cognitive processing. \n\n3. Qualitative Reproduction of Behavioral Aspects\nAlloNet qualitatively reproduces certain behavioral aspects of subitization, such as the relationship between reaction time and numerosity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces AlloNet, a spiking neural network architecture that incorporates an allostatic control mechanism for the regulation of persistent states. The authors use a ring attractor network coupled with a Hammel model to achieve dynamic control of spatial changes in neuronal activity. The model is applied to a numerical cognition task (subitization) to demonstrate its ability to modulate the location of a bump of activity as a function of a reference input. \n\nThe main idea appears to be original and effective, however the relevance of the model, such as biological plausibility and comparison with other models could be better discussed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Limited Biological Plausibility: While the model draws inspiration from biological systems like the Hammel model for temperature regulation, the direct application of such a model to numerical cognition might oversimplify the underlying biological mechanisms.\n\nSpecificity of the Model: The paper focuses heavily on subitization as an application. It would be beneficial to explore additional cognitive tasks to demonstrate the generalizability of AlloNet."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* Is the bump instability, in Fig. 5, influenced primarily by synaptic time constants, or are other factors like network noise equally contributory?\n\n* It would be interesting to see the AlloNet benchmarked against other attractor models that handle drift, like ring attractor networks with stabilizing feedback (e.g., [Accurate Path Integration in Continuous Attractor Network Models of Grid Cells\n](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1000291))"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors provide a novel approach to applying allostatic principles to control internal representations within spiking neural networks. This approach may inspire further cross-disciplinary exploration of allostasis and neural network control. In particular, the use of allostasis as a tool of synchronizing the alignment between internal representations and external stimuli could make the model useful in applications requiring real-time adaptability, such as robotics or artificial agents interacting with unpredictable environments."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents the AlloNet model, which applies allostatic control principles to spiking neural networks to maintain adaptable, persistent internal states. By extending the biological concept of allostasis, the authors enable a ring attractor network to dynamically align internal representations with external stimuli. The model is specifically applied to a subitization task, where it demonstrates the ability to align internal numerical representations with external stimuli through an attractor network’s localized bump of activity."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The paper presents allostasis as a beneficial mechanism but does not sufficiently compare it to traditional homeostatic mechanisms or other neural adaptation frameworks. This limits understanding of when allostatic control is most useful or essential, making it difficult to gauge the model's full significance\n\n* The experimental setup is limited to idealized, controlled tasks. Real-world applications, however, typically involve noisy, unpredictable inputs that can disrupt internal representations, which this model might struggle with. Current experiments do not address such robustness.\n\n* The motivation for using a ring attractor with a \"bump of activity\" as the representation for the task of numerical cognition (e.g., subitizing) is not clear. What is the (systems) neuroscience evidence for this? \n\n* The authors note that error rates increase with numerosity and time, but more in-depth insights into the reasons for these errors, such as bump instability, could clarify model limitations"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See the Weaknesses section. I have some additional questions:\n* I don't entirely understand the use of a ring attractor for this task. For a number line representation, wouldn't a bounded line attractor be a better idea, especially as it does not wrap around from the maximum to the minimum? Furthermore, the use of a continuous attractor makes the representation more akin to estimation rather than categorisation/subitising, and would align better with work such as [Piazza et al. (2002)](https://www.sciencedirect.com/science/article/abs/pii/S1053811901909802?via%3Dihub) claiming that counting and subitising use the same neural circuitry, as opposed to [Kutter et al. (2023)](https://www.nature.com/articles/s41562-023-01709-3). Could the authors clarify this and correct me if I have misunderstood something?\n* How do the authors choose the various hyperparameters mentioned in Table 1? Have the authors tested the robustness of the results to changes in hyperparameters apart from the time constant?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "* The proposed model is original to my knowledge, and is fairly simple to understand.\n* The authors have compared observations from their models such as reaction times to those of humans performing the same task.\n* A testable prediction, i.e., that small numerosities are encoded using a magnitude-based system (ring attractor in this case, which is central to the model), could potentially be validated using experiments with animals/humans."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a mechanism to control the activity of spiking neural networks through allostasis, in order to perform a subitising task. The proposed architecture uses the Hammel Model to regulate internal beliefs associated with the task in response to stimuli, and this is used to control bump activity on a ring attractor that encodes numerosity. The authors compare observed reaction times from their model to those of humans performing the same task, and are able to reproduce certain human behavioural responses with their model. They also assess the impact of the value of the neurons' time constant on task performance, and compare a single neurons' activity profile to known neural dynamics for subitising in humans."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* While the authors are able to recapitulate some observations in humans using their model, i.e., that reaction times increase with numerosity (for a specific value of the time constant), the actual reactions times from the model are far greater than human reaction times (almost 10000 ms vs less than 500 ms) ([Dehaene, 2011](https://psycnet.apa.org/record/2011-10610-000); [Kutter et al., 2023](https://www.nature.com/articles/s41562-023-01709-3)). Do the authors have an explanation for why this is the case, and could it be resolved with better choices for hyperparameters?\n* I'm not entirely convinced with the authors' argument that they are able to properly reproduce reaction time patterns. The sharp jump in reaction time for a numerosity of 4 is only observed with a different, faster time constant value of 900ms, while the gradual increase in reaction times with numerosity is only observed with a slower time constant of 1000ms. Furthermore, in the 900ms case, the reaction times for lower numerosities do not match the human data at all, as the authors admit. Could the authors clarify this? I would expect the model to match human observations for a single value of the time constant in order to claim that it reproduces experimental results.\n* The model is evaluated only on a single task, i.e., subitising. Especially given the use of a ring attractor and the authors' claims of the model's generalisability, it would be important to evaluate the model on other tasks involving numerosity estimation or tracking a magnitude, such as head direction integration ([Valerio & Taube, 2012](https://www.nature.com/articles/nn.3215)) or average estimation ([Lee & Ma, 2020](https://cognitivesciencesociety.org/cogsci20/papers/0304/0304.pdf)).\n* The task is also limited in that it restricts numerosity from 1 to 4. To better compare the observations from the model to previous experiments ([Kutter et al., 2023](https://www.nature.com/articles/s41562-023-01709-3)), the authors should incorporate both small and large numbers (from 0 to 9, for example). If the authors want to align their work better with [Kutter et al. (2023)](https://www.nature.com/articles/s41562-023-01709-3), two different representations could be used for 0-4 vs 5-9 – and in this case it would be important to test whether discrete attractors (fixed points for each of 0-4) match the data better than when using a ring attractor.\n* There are other issues with the results and their interpretation. For example, in Fig. 3B, it is not clear how much more variable the reaction times are for higher numerosities with longer time constants (as claimed on line 312). Furthermore, from Fig. 3D, it seems that the \"quality score\" is very low for numerosity 3 and longer time constants (while for shorter time constants, quality is 0 for numerosity 2 but quality is higher for 3), why is this the case, and is there any experimental evidence of this in animals/humans?\n* The authors claim that the neural dynamics of their model are similar to those of several brain regions in human recordings, but this is not substantiated with a metric or even a plot showing qualitative similarity. Furthermore, some of the analysis of neuronal responses has only been done for a single, arbitrary neuron, but it would be important to look at population responses when making comparisons to human data.\n* Another weakness is the specificity of these results to hyperparameter and architectural choices. The lack of learning also makes it harder to adapt this model to more complex tasks.\n* Finally, the writing lacks clarity and contains grammatical, formatting and some typographical issues. Examples:\n\n * Line 50 (\"can represent to track changes...\", unclear what this means), Line 53 (\"resemble to the\" -> \"resemble the\"), Line 158-159 (\"functions to take input for..\"), Line 249 (\"in an standard...\"), Line 463-465 (sentences are vague, \"we allowed to work on\"), etc.\n * Several in-text citations are not properly enclosed within brackets.\n * \"Excitatiory\" -> \"Excitatory\" (Table 1), inconsistent use of \"ise\" vs \"ize\" (British vs American English), \"... as a computation model ...\" -> \"computational\", lack of proper spacing before and after parentheses, etc.\n\n I would encourage the authors to carefully revise the paper to improve its clarity and fix any other grammatical/typographical issues."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How did you decide on this setup, specifically the use of high/low gain modulation neurons to drive shifts and the Hammel model to control them? Is there biological evidence supporting these choices?\n2. Could a similar task be achieved with a rate model? If so, how would its reaction time compare to that of the spiking model?\n3. The alpha synapse decay constant for HGM and LGM is approximately 1000ms, which is unusually long for neurons. Could you provide more reasoning behind this choice?\n4. The input setup is unclear. Do the four Poisson spike generators fire at the same rate? If not, how do they differ, and why is numerical information encoded in firing rates? Is there experimental evidence for this encoding, or could a different encoding method (like one-hot vectors or embeddings) be used?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors propose a novel network model based on the ring attractor, enhanced with high/low gain modulation neurons to manage bump shifts, with the Hammel model controlling these shifts. This enables flexible positioning of the bump to match external signals.\n2. Some aspects of the model’s performance on counting tasks align with human data, particularly when varying the synaptic time constant."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper investigates how structured networks, like ring attractors, control bump activity in response to external stimuli? The authors propose a network model combining a Hammel model and ring attractor network, with high/low gain modulation neurons driving the bump shift to align with an external stimulus. They argue that this model, due to its control over bump positioning, could model subitization (rapid counting) and compare it to biological data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The counting task demonstration is very simple; the numerical information is directly encoded in the external signal’s firing rate.\n2. Although certain model properties align with human data, they do so under varying synaptic time constants, which are not well explained. Additionally, the model’s reaction times are significantly slower than human responses, by about an order of magnitude.\n3. The necessity of using a spiking neural network is not adequately justified.\n4. The model’s structure, specifically the gain modulation neurons and the Hammel model component, lacks biological explanation.\n5. The network dynamics and mechanisms are underexplained. For instance, in the connection weight formula $w_{ij}$, the term $d_{ij}$ is undefined, though it appears to represent the distance between neurons in the ring.\n\n\n### Mino \n1. In Equation 1, $d_{ij}$ is undefined.\n2. Figure 1, with its four subplots, could be clearer if organized differently.\n3. In Figure 2, specify units for the x-axis (probably ms).\n4. The synaptic decay constants \\tau used are unusually long compared to standard neuron models. More explanation of these values and their role would improve readability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tThere’s a saying suggesting that the number line is organized not linearly but log-linearly (Testolin, A., & McClelland, J. L., 2021). Could the present model still function effectively if the ring attractor network were replaced with a log-linear number line?\n2.\tIdeally, each point on the ring attractor should be neutrally stable, meaning the probability of bump initiation would be equal at any point on the ring. However, the initiation point significantly affects reaction time. How do you ensure the bump consistently initiates from the same point each time?\n3.\tIn Eq. 4, should it be V_th-h so that the potential is below the threshold? Also, could you clarify lines 241-243 preceding Eq. 4? I ask because, in my understanding, the bump should remain stable as long as LGM and HGM neurons are firing equally but not necessarily silent."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tThe introduction section provides a comprehensive overview of both numerosity and attractor networks.\n2.\tThe integration of physiological concepts into neural circuit modeling is innovative and inspiring."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work develops a framework that dynamically adjusts representation based on environmental inputs, drawing inspiration from the body’s temperature regulation system and incorporating the concept of allostasis. To encode stimuli, the model employs a ring attractor network, while an allostasis module called “Hammel model” serves to compare internal and external variables. This module then outputs gain control signals to drive the ring attractor’s response moving. The model was verified by a subitizing task."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tImproper Citations: For instance, in lines 173-174, citing Zhang, K. (1996) is essential for discussing the asymmetry in continuous attractor connections. Additionally, it appears that Fig. 1c is likely influenced by Fig. 1d in Wilson, R.I. (2023), but lacks appropriate citation.\n2.\tLack of Clarity on Network Dynamics and Analysis: The model does not provide a clear definition of network dynamics or a detailed theoretical analysis of the results.\n3.\tBiologically Implausible Model Setup: The model’s structure and parameters are set in bio-unplausible ways. Specifically:\n\t- The synaptic time constants far exceed the plausible range, making the comparison across time constants in Figs. 4 and 5 less meaningful.\n\t- LGM and HGM neurons were designated as inhibitory. I agree that it is a feasible way to drive the ring attractor, but there has been both experimental and theoretical evidence showing that P-EN neurons in Drosophila brains fulfill this role as excitatory neurons (see Zhang, W., Wu, Y. N., & Wu, S., 2022; Mussells Pires, P., Zhang, L., Parache, V., Abbott, L. F., & Maimon, G., 2024).\n4.\tFailure to Model Human Behavior Accurately: Although section 3.1 attempts a loose comparison between the model and human behavior, the model does not successfully replicate human behavioral patterns.\n5.\tUnpolished text and figures: The text contains several typos, unified terminologies and missing punctuation."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024allostatic,\ntitle={Allostatic Control of Persistent States in Spiking Neural Networks for Perception and Computation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zbIS2r0t0F},\nnote={under review}\n}"
},
"abstract": {
"value": "We introduce a novel model for updating perceptual beliefs about the environment\nby extending the concept of Allostasis to the control of internal representations.\nAllostasis is a fundamental regulatory mechanism observed in animal physiology\nthat orchestrates responses to maintain a dynamic equilibrium in bodily needs and\ninternal states. In this paper, we focus on an application in numerical cognition,\nwhere a bump of activity in an attractor network is used as a spatial-numerical\nrepresentation. While existing neural networks can maintain persistent states, to\ndate, there is no unified framework for dynamically controlling spatial changes in\nneuronal activity in response to enviromental changes. To address this, we couple\na well-known allostatic microcircuit, the Hammel model, with a ring attractor, re-\nsulting in a Spiking Neural Network architecture that can modulate the location of\nthe bump as a function of some reference input. This localised activity in turn is\nused as a perceptual belief in a simulated subitization task – a quick enumeration\nprocess without counting. We provide a general procedure to fine-tune the model\nand demonstrate the successful control of the bump location. We also study the\nresponse time in the model with respect to changes in parameters and compare\nit with biological data. Finally, we analyze the dynamics of the network to un-\nderstand the selectivity and specificity of different neurons to different categories\npresent in the input. The results of this paper, particularly the mechanism for mov-\ning persistent states, are not limited to numerical cognition but can be applied to a\nwide range of tasks involving similar representations."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Allostatic",
"Dynamic",
"Attractors"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/0fd7facf0f7b6004f7f39a7ab787d518cb3d417b.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to neuroscience & cognitive science"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/00393cbdadb5e6a3deeaddf31561fb8b8e651d25.zip"
},
"title": {
"value": "Allostatic Control of Persistent States in Spiking Neural Networks for Perception and Computation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zboCXnuNv7 | Semialgebraic Neural Networks: From roots to representations | main | Active | deep learning;semialgebraic functions;homotopy continuation;real algebraic geometry;recurrent neural networks | learning theory | 3;5;6;8 | 2;3;4;3 | 2;2;3;3 | 2;3;3;3 | 1;2;2;3 | 5.5 | 3 | 2.5 | 2.75 | 2 | 0.588348 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "na"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In the introduction authors made an analogy with deep equilibrium model and write (line 46) \"Therefore, in principle, we can use a neural network combining polynomials and ReLU activations to learn G, then append a root-finding procedure such as Newton’s method to compute $F (x) = \\text{root}(G(x, \\cdot)) = y$ in a manner similar to Bai et al. (2019).\" Given that it seems that deep equilibrium models are already capable of computing arbitrary bounded semialgebraic function, and the claim that authors present first such method (line 59, \"To our knowledge, we present the first neural networks capable of computing arbitrary bounded semialgebraic functions on high-dimensional data\") is misleading. Can the authors please clarify this?\n2. In line 270 and equation (2), (3) that jointly define SANN there is an operation defined as $M^{-1}b$ if $M$ is invertible and $0$ otherwise. This operation is not convenient from the numerical perspective, since it requires the inversion of potentially large matrices, besides it is not cheap to test numerically that $M$ is invertible. Is it possible to generate $M^{-1}$ right away from $ISD_{\\text{net}}$ in place of $M$? Is it important that $M$ can be not invertible? Can the authors please clarify this part?\n3. In line 332 authors write \"In our experiments, we found it insufficient to train using only the accuracy of the final output $\\left\\|y^{\\star} − y(1)\\right\\|$.\" No numerical experiments can be found in the article. Can the authors please describe numerical experiments they tried and show the results?\n4. As I understand, training procedures include differentiation through the ODE solver. For that one needs to solve the adjoint ODE equation, and this is going to incur additional numerical costs. In a similar way, a deep equilibrium model defines an adjoint equation to obtain a derivative. Given that it is not clear what advantages the proposed method has over the deep equilibrium model (as discussed in the introduction) in terms of computations required. I kindly ask authors to discuss this issue.\n5. In G2 authors provide interesting applications of SANNs. The idea, as I understand it, is to apply SANN to the space of equivalent feed-forward ReLU networks (which form a semialgebraic set). It is not entirely clear to me how this set can be constructed and manipulated with SANN. Can the authors provide more details, preferably, with some simple examples? It seems to me that this set is hard to work with, because practically indistinguishable networks can have weights arbitrarily distant in the $L_2$ norm https://arxiv.org/abs/1806.08459. It is also not clear to me how precisely SANNs can be used in the context of Lipschitz networks and for sparsifications."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The article introduces an interesting original idea that can be potentially used for many problems laying on the intersection of scientific computing and machine learning. Authors provide many details, examples and clarifications that help the reader with little background in semialgebraic approximation to better understand theory authors develop. Theoretical results seem to indicate that the proposed class of models represents a rather general set of functions. I also find particularly stimulating a discussion of relation between SANNs and deep equilibrium models solved with numerical continuation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors proposed an architecture that is capable to approximate an arbitrary semialgebraic function. The main ingredients making the construction possible are two facts (i) it is possible to efficiently generate functions from ISD by neural networks with ReLU activations, (ii) any closed semialgebraic set (defining semialgebraic function) is a kernel of some ISD. Authors exploit these facts by combining a neural network with a homotopy method that deforms a simple semialgebraic set into a target semialgebraic set. The role of homotopy is to replace rootfinder that can be also used to find kernel of ISD. The resulting architecture resembles neural ODE, and requires adjoint for training. Authors demonstrated several theoretical results showing that with the proposed architecture it is possible to approximate continuous and discontinuous semialgebraic function."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The article is theoretical, so the weak side is, naturally, a discussion of practical matters: computational complexity, how networks should be trained, how it compares with different related models, and questions alike. I put some of these questions in the section below, but overall I do not find this is a significant disadvantage, given that the goal of authors is to provide a certain \"universal approximation\" result for novel architecture they propose."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "None"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "* \"To our knowledge, we present the first neural networks capable of computing arbitrary bounded semialgebraic functions on high-dimensional data.\" What advantage does such an architecture have compared to SoTA neural networks which are already widely used and easy to train?\n* \"In our experiments...\" what experiments are you referring to? Would it be possible to include the discussed experiments in the final manuscript? If not please remove any references to experimental results\n* You provide theoretical guarantees (Theorems 14 to 16) that your model can represent any bounded semialgebraic function. Do you have any intuition on learnability? Could your model learn a function exactly given a finite amount of samples?\n* Can you give examples of how representing discontinuous functions exactly might be useful in practice?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The theoretical contributions of this paper seem strong. Capacity to compute all bounded semialgebraic functions (and extension to discontinuous functions) seem to be solid theoretical guarantees.\n2. The proposed method seems, in principal, easy to implement and computationally efficient.\n3. The method seems, in theory, to be applicable to a large range of optimization problems."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes the use of polynomial homotopy continuation methods to design a novel neural network achitecture. Homotopy continuation methods consider a function $H$ which continuously deforms a target function $G_0$ until it reaches a target function $G$. This method uses neural networks with RELU activations to compute the vector field of an ODE corresponding to this process of continuous deformation.\n\nThe authors show formally that this method can compute all bounded semialgebraic functions and that their method can be extended to discontinuous functions. \n\nFinally, a general class of optimization problems is specified, and the authors give example applications of their method."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The subject matter presented in this paper is quite difficult, and I believe most ML researchers are probably quite unfamiliar with these mathematics. That being said, I found the exposition to be quite hard to read. Here are some more precise points/considerations I believe would improve the readability\n * Given this paper is presented to a machine learning (ML) conference, there should be more emphasis on the underlying learning problem. It might be interesting to motivate your architecture through a regression framework, for instance.\n * You include many proofs in the main text and do not give much intuition on your mathematical results. I believe it would improve readability to put proofs in the appendix and spend more time developing intuition in the main text. You could for instance add more examples throughout the text such as the one presented in \"Example 6\".\n * I believe the algorithmic description of the architecture should be included in the main paper as it explains clearly the forward pass of your algorithm. It might be interesting to include \n\n2. The paper offers no experimental results on the proposed architecture. Given the main contribution of this paper is a novel architecture, empirical validation demonstrating the trainability of the model is crucial. I believe adding simple experiments with synthetic data would increase the value of the paper. You could, for instance, define a simple regression problem with synthetic data generated by a semialgebraic function, and show your model can exactly recover the correct mapping.\n\n3. \"We give a few remarks on training SANNs, and leave a more thorough investigation for future work.\" Although you do define a loss objective to train the model, you provide no empirical or theoretical justification that this loss objective works. As with point 2, I believe it would strengthen the paper to give a thorough investigation (either theoretical or empirical) of the training procedure. For notes on doing this empirically see point 2. This could also be done theoretically by analyzing the training dynamics for example.\n\n4. I find the structure of the paper to be confusing. I will list my comments about paper structure below:\n * There is no discussion or conclusion: it would be nice to add a conclusion section in which you discuss limitations and address future directions of research. \n * The section which presents example applications does not contain any actual examples, only the general class of problems. It might help readability to use one of the example applications given in the appendix as a motivating example. \n * Most of the paper seems to be an introduction to semialgebraic geometry and homotopy continuation methods (section 2 and first half of section 4). Maybe it would help readability to give a less detailed/rigorous introduction to the field and focus on explaining how the architecture works/why this method is useful."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Why should semialgebraic function F not be simply modelled by ‘standard’ MLPs/NNs? In other words, why should F be encoded to the kernel of a piecewise polynomial? \n- Do the authors have a (rough) estimation and/or observation about the computational complexity of fead forward and training of SANNs?\n- What would be a concrete example and task in PDE simulation to which SANNs are expected to perform better than existing methods? What would be the advantage of using SANNs in this case?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- Building a neural network based on semialgebraic geometry is very refreshing and definitely new class of ML models.
\n- Using (the idea of neural)ODE as a computational graph of SANN also makes a quite sense assuming polynomial homology continuation method is used. \n- Visualization of the homotopy continuation method is really helpful to foster the readers’ understanding.\n- Appendix covers exhuastive theoretical contents of the paper. Some applications are also discussed."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a new class of neural network models, SANNs. The model is built on the basis of semialgebraic theory. The fead-forward network is defined using ODE and polynomial homotopy continuation method is used to ‘universality’ of the expressiveness of the neural networks. The paper focuses on theoretical aspects of the neural networks and contains abundant theoretical claims."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Theoretical aspect:** In general, the paper assumes readers to be very familiar with semialgebraic geometry, which is (at least at this point in time) not a main stream of ML communities. I would presume the paper would be very hard to follow especially for those who are not familiar with this topic. I strongly encourage the authors to revise the paper so that the contents of the paper could be more accessible to even those unfamiliar with this topic. The followings are some concerns I found:\n- The introduction of the paper is very hard to understand. Authors’ claims in the first paragraph of the introduction ‘’Semi algebraic functions include anything computable using …” and/or “Due to their ubiquity” would be unconvincing for those unfamiliar with this topic. It would be very helpful if the authors include references and/or brief explanation on the representative “classical numerical algorithms” that compute semialgebraic functions.\n- The definition of lattice should be mentioned before Definition 1 or referring to Appendix A -- I would say that at least in ML-community a lattice typically means $ \\mathbb{Z}^{d}$.\n- ‘ker(f)’ in Proposition 7 should be also introduced. I would also say, the first sentence of the proof might be a bit rough -- It would be better to mention the continuity of $f$. Need to clarify the definition of $C^1$, since $\\max ( 0, -q(x) )$ is generally not differentiable.\n- The proof of Proposition 10 is hard to understand. Especially, ‘another ISD-function’ at line 311 is vague. Tarsi-Seidenberg theorem should be also stated somewhere in Appendix.\n- What the authors mean by Training is somewhat unclear. When SANNs are trained, what parameters of SANNs are going to be trained? Does that mean the coefficients $a$ of $f_{k}(x)$ at line 154 are the trainable parameters? \n- The claim in the lines 42-43 ‘neural networks capable of exactly computing any bounded semialgebraic function’ sounds vague since it is not clear what the authors mean by ‘exactly compute’. Does it mean, the proposed neural network can solve any root finding problems for semialgebraic functions?\n\n**Experimental aspect:**\n- The application that benefits from the proposed network is not clear and it is hard to evaluate numerical advantages of the method. Key advantages of the networks in numerical experiments are stated as that the computational complexity is low and the evaluation time is fixed when using (non-adaptive) ODE solver, either of which are not evaluated in experiments or important classes of applications. While the paper rather has a technical sound and supports the theoretical validity of those two aspects, having numerical experiments that support authors’ claim would add stronger experimental support of the method's advantage. \n- I managed to find in line 332 ‘In our experiments, we found it insufficient to train using only the accuracy…’, but I cannot find results of any experiments. \n- The example applications in Appendix G should be discussed in the main text. \n\n**Minor:**\n- ‘compute’ feels abused frequently. In some sentences, ‘represent’ instead of ‘compute’ sounds more convincing for me.\n- L.20 or 21, “is able execute…”\n\n\n**Overall comment (and meddling suggestion):**\n\nThe theoretical results of this paper are very intriguing and I believe this work would shed a light on the new usage of semialgebraic geometry in ML domain. On the other hand, I cannot overlook the major drawbacks that the paper lacks experiments which support theoretical finding and does not clarify numerical advantages against existing methods, as well as the lack of the readability of the paper. Therefore, I will give the paper the score ‘6’. However, I would be very comfortable to raise the score once those issues are addressed satisfactorily. \n\nThis may be none of my business, but I also cannot refuse wondering if this paper might also have more appropriate venue to be submitted given the abundance of theoretical contents. It might be very interesting if the authors could look for venues with more theoretical sound, such as (computational) mathematics journal. While the work of this paper definitely falls down into some category of machine learning domain, I also feel the paper would make a greater impact in the different communities;)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Questions:\nThe questions are in no particular order.\n1. Fig 1 caption: Why does homotopy not give parts in kernel of G that are not part of F but somehow also correctly gives isolated point (0,0)? \n2. What does \"defined in pieces\" mean?\n3. In corollary 8, \"gr\" is not defined.\n4. Why not learn the kernel directly? (As in, learn the predictive model F(x)= y as is standard in ML) What is the benefit of learning G?\n5. Why do we expect (7) to be small when s < 1? \n6. Above Thm 16 you say \"exactly compute\" but in remark earlier you mentioned that this will still have discretization error due to ODE solve. Should this be changed? \n7. Why does Theorem 14 only succeed with \"probability 1\"?\n\nSuggestions:\n1. Define acronym SANNS in main text.\n2. Clarity of Definition 1 can be improved. Perhaps the free lattice should be defined? C^k(D, R^n) is not defined (I'm assuming this is just k times differentiable functions on R^n?) \n**3. I do not understand the definition of SANNs in equations 2-5. How does little n affect Z(N) and what is the product over? What is M and what does the the tuple equation (M, b) = N mean? (I'm guessing but can't be certain it's like the clamp-sol argument.) I suggest clarifying the notation here more carefully. \n4. The distinction between existing work and new contributions is not clear in 4.2. Can you write your results as a proposition?\n5. Does Section 4.1 really belong in Section 2?\n6. In your summary of Section 4, perhaps include the subsection numbers to help navigate the relationships between the proofs."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Overall interesting application of homotopy methods to learn semi-algebraic functions. The work includes nice background on semi-algebraic geometry. Overall the high level idea is potentially interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Semialgebraic Neural Networks (SANNs), a novel neural architecture designed to exactly compute bounded semialgebraic functions through homotopy continuation methods. The key idea is to encode the graph of a learned function as the kernel of a piecewise polynomial and then use numerical ODE solvers to evaluate it. The authors provide theoretical guarantees for the expressivity of SANNs, showing they can represent both continuous and discontinuous bounded semialgebraic functions. The work bridges classical numerical analysis techniques with modern neural network architectures."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The biggest weakness of the paper (and major reason for the score) is the lack of clarity in the definition of SANN's and Section 4. I mention some suggestions in the questions below.\nThe introduction hints at a potential answer, but I don't understand why one would want to use this framework. Is this better than using the traditional learning framework (of learning F directly?)\nHere is a list of other weaknesses:\nThere is no implementation or numerical work. (This isn't necessary, but I think the claim that you \"demonstrate on applications\" isn't accurate.)\nIt isn't clear which results are classical, or almost classical with a few notational differences. For example, is Proposition 7 a new result? What is new in Section 4.2?"
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present a neural network architecture, grounded in classical homotopy continuation methods for root-finding, that can exactly represent any bounded semialgebraic function."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024semialgebraic,\ntitle={Semialgebraic Neural Networks: From roots to representations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zboCXnuNv7},\nnote={under review}\n}"
},
"abstract": {
"value": "Many numerical algorithms in scientific computing---particularly in areas like numerical linear algebra, PDE simulation, and inverse problems---produce outputs that can be represented by semialgebraic functions; that is, the graph of the computed function can be described by finitely many polynomial equalities and inequalities. \n In this work, we introduce Semialgebraic Neural Networks (SANNs), a neural network architecture capable of exactly computing any bounded semialgebraic function up to the accuracy of a numerical ODE solver chosen by the programmer.\n Conceptually, we encode the graph of the learned function as the kernel of a piecewise polynomial selected from a class of functions whose roots can be evaluated using a particular homotopy continuation method.\n We show by construction that the SANN architecture is able execute this continuation method on each connected component of the target function, thus evaluating the learned semialgebraic function.\n Furthermore, the architecture can exactly compute even discontinuous semialgebraic functions in a natural way.\n Lastly, we present a general formulation for optimization problems whose solution operators are representable by SANNs, and we demonstrate particular applications to nonlinear inverse problems and deep learning hypernetworks."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep learning",
"semialgebraic functions",
"homotopy continuation",
"real algebraic geometry",
"recurrent neural networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/327e3363954398da14e218e0015d929d1ed54f62.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Semialgebraic Neural Networks: From roots to representations"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zbpzJmRNiZ | From Uncontextualized Embeddings to Marginal Feature Effects: Incorporating Intelligibility into Tabular Transformer Networks | main | Active | Tabular Deep Learning;Interpretability;Tabular Transformer Networks | interpretability and explainable AI | 3;5;5;6 | 2;2;1;4 | 2;2;2;3 | 2;2;2;4 | 1;2;2;2 | 4.75 | 2.25 | 2.25 | 2.5 | 1.75 | 0.473684 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 1
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "n/a"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "n/a"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "I do not feel qualified to review this paper as I do not have any experience with tabular machine learning. I would like to ask the ACs to seek an opinion from different reviewers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "n/a"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Could you please clarify the results in Section 2.1, especially how to interpret them with respect to interpretability?\n- In practice, how can we be confident that the feature networks have the capacity to fully capture marginal effects? If they can't fully capture them, then part of the marginal effect could be learned by the transformer instead."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- In general, the proposed method makes sense to me and I could see it being used in practice. I've worked with tabular transformers in applied settings and can attest that understandable per-feature explanations are difficult to achieve. This method makes sense to me as a way to get greater interpretability while retaining the performance of tabular transformers, or other deep tabular models.\n- As far as I can tell the proposed method is original. It resembles NATT (Thielmann et al. 2024), but I think the design differences are significant in practice and make sense. This method passes all features to the transformer as input, then uses dropout during training to guide the model to a desired behaviour that makes all features interpretable, whereas NATT keeps numeric and categorical features separate and only provides a comparable level of interpretability for numeric features.\n- Experimental results are fairly convincing in showing that the proposed method doesn't sacrifice performance."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a modification to predictive tabular transformer models to make them more interpretable: producing the final output as a sum of the transformer output along with shallow univariate models for each input feature. Dropout is used to encourage the model to optimize its use of each feature independently. The univariate models can then be interpreted as indicating the marginal effect of each feature on the overall output."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Discussion of related works is lacking. There is no separate related works section, just some context provided in the introduction, and while a decent selection of related papers are cited, there's little discussion of how the proposed method relates to and differs from existing work. One point I would like to see addressed is whether and how this method improves on methods like integrated gradients, LIME, or SHAP that produce per-feature explanations without requiring model modifications.\n\n- Some theoretical results are given in Section 2.1, but it's not described how the results relate back to the interpretability of the model, and there are some particular issues I wanted to raise:\n\t- The definition given for $R_{\\tilde{w}_{-k}}$ at line number 279 doesn't make sense to me - it could even take the opposite sign of the overall risk \\(R\\) if the model has a greater loss with just feature $k$ than on average.\n\t- Equation (7) depends on the assumption that the risk for each single-feature dropout vector is the same and equal to $R(1-p(\\tilde{w}_k))$, which seems very unlikely for realistic models or datasets where features have different importances and the model tends to perform better given more features. So I don't see the relevance of this result.\n\t- My interpretation of the section is that when training with dropout, the performance of individual feature predictors is in some sense bounded by the overall model performance. This makes sense, but it's not clear to me if this indicates anything about the interpretability of the model in terms of the individual feature predictors or what their predictions represent (i.e., do they actually optimize the loss with respect to $\\mathbb{E}_{y|x_k}[y|x_k]$, or just something that's not too far from it?).\n\n- Even under the proposed approach, the transformer component itself is somewhat of a black box. This could be tolerable given the improved interpretability of the overall model, but it is a limitation.\n\t- While thinking through the method, I kept coming back to what the optimal behaviour learned by the transformer and shallow predictors would be. I think it is true that the optimal behaviour at low dropout rates is at least approximately for the transformer to only predict joint effects plus marginal effects that are too complex for the shallow predictors to learn, rather than learning all joint and marginal effects. A proof of this would be useful if possible though.\n\nGiven that I think the proposed method could have real practical utility, I'm leaning towards an accept, but I would prefer to see more a more relevant theoretical discussion."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See Weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "From my prospective, the strengths of this paper are as follows:\n- Compared to FT-Transformer or GAM, the proposed NAMformer has more interpretability and stronger predictive power, respectively\n- There are analysis between contextual and uncontextual embeddings, proving that the uncontextual embedding can represent the original feature\n- There are a lot of formula providing theoretical support"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposed an adaptation of tabular transformer networks designed to identify marginal feature effects, while maintaining the capability of prediction performance at the original level."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "After reading the paper, I still have some questions.\n1. In section 2, the encoding of numerical features involves the threshold $b_t$, and the thresholds are from the decision trees. So I wonder how we get these decision trees? Is the marginal effects of the model the same as those decision trees? And what about the performance compared with the decision trees.\n2. The method seems too trivial to achieve the expected performance. Only a Linear and dropout can identify the marginal and even enhance the performance.\n3. In tabular domain, it seems that 8 datasets in all are not sufficient. This may result in doubt that the datasets used are selected for this task.\n\nIn addition to the above questions, I think there are some problems with typography and content。\n1. It will be better if there is a section which introduces the related work, e.g. transformers in tabualr, marginal feature effects in tabular or other domains, etc. I think this will help reader better understand the task and your method.\n2. It seems that in section Abstract, there is some problem with the spacing between lines."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 2
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- What are uncontextualized embeddings? As far as I can tell, this is never explained in the paper. \n- What does token identifiability mean? This is also not defined."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The claim that the proposed NAMformer is not worse than FT-Transformer in terms of prediction performance seem true on the tasks that were used in the evaluation. \n- The general idea of making tabular transformer architectures interpretable seems like it should be useful for a variety of problems. \n- The inclusion of synthetic evaluations and ablations are useful for understanding the mechanics of the proposed approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a modification to one of the tabular transformer architectures that enables features to be interpretable. Specifically, the technique proposed in this work, NAMformer, which is built on top of the existing FT-Transformer, enables the identification of \"marginal feature effects\" which can be used to interpret the final model. NAMformer performs comparably to the original FT-Transformer, while maintaining desirable interpretability properties."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The introduction and methodology sections could benefit significantly from writing improvements and more thorough explanation. In its current form, these sections assume a lot in terms of prior knowledge, e.g. of marginal feature effects, the FT-Transformer architecture, target-aware embeddings, and uncontextualized embeddings. These should be explained, at least at a high-level. Additionally, from Section 2, it is somewhat unclear which parts are inherited from the prior work on FT-Transformer and which parts are new. Particularly, I was confused about the feature encoding part--without referencing the prior work, I cannot easily assess if this part of the work is novel. \n- The authors should include specific use-cases in which the marginal feature effects are useful downstream. Otherwise, it is challenging to assess the significance of the contribution. \n- The paper should have a standalone explanation of the datasets that are being used in evaluation beyond saying that the evaluation is done on four regression and four binary classification datasets---what are these datasets and where do they come from? The authors seem to refer the reader to the appendix for this information, but this is critical for assessing the results. Furthermore, the evaluation should be more thorough than regression and binary classification -- conceivably, the proposed method could become worse than FT-Transformer as the number of classes grows. The authors should include such tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024from,\ntitle={From Uncontextualized Embeddings to Marginal Feature Effects: Incorporating Intelligibility into Tabular Transformer Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zbpzJmRNiZ},\nnote={under review}\n}"
},
"abstract": {
"value": "In recent years, deep neural networks have showcased their predictive power across a variety of tasks. The transformer architecture, originally developed for natural language processing, has also shown great efficiency in handling tabular data, offering a competitive alternative to traditional gradient-boosted decision trees in this domain. However, this predictive power comes at the cost of intelligibility: Marginal feature effects are almost completely lost in the black-box nature of deep tabular transformer networks. Alternative architectures that use the additivity constraints of classical statistical regression models can maintain intelligible marginal feature effects, but often fall short in predictive power compared to their more complex counterparts. To bridge the gap between intelligibility and performance, we propose an adaptation of tabular transformer networks designed to identify marginal feature effects. We provide theoretical justifications that marginal feature effects can be accurately identified, and our ablation study demonstrates that the proposed model efficiently detects these effects, even amidst complex feature interactions. To demonstrate the model's predictive capabilities, we compare it to several interpretable as well as black-box models and find that it can match black-box performances while maintaining intelligibility. The source code is vailable at https://anonymous.4open.science/r/nmfrmr-B086."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Tabular Deep Learning",
"Interpretability",
"Tabular Transformer Networks"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/51a07c084ffc3ee927c62ebe506908470e874e6e.pdf"
},
"presentation": null,
"primary_area": {
"value": "interpretability and explainable AI"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "From Uncontextualized Embeddings to Marginal Feature Effects: Incorporating Intelligibility into Tabular Transformer Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zcTLpIfj9u | Future-Guided Pretraining via Time-to-Event Supervision for 3D Medical Imaging | main | Active | Multimodal learning;medical imaging;Electronic Health Records | applications to computer vision, audio, language, and other modalities | 3;6;6 | 4;4;3 | 2;3;3 | 2;3;3 | 3;4;3 | 5 | 3.666667 | 2.666667 | 2.666667 | 3.333333 | -0.5 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Why start from 3D image scans instead of 2D medical images? Is this due to the dataset choice, or has similar work already been done on 2D data?\n2. How does the choice of time segmentation for EHR data affect model results during pretraining? Specifically, my understanding is that the model predicts the probability of a patient experiencing a certain event at intervals like 1, 2, or 3 years post-scan. How does the granularity of these time segments impact the performance of the pretrained encoder?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. Innovative Approach: The method creatively leverages EHR data following a medical scan to assist model pretraining, demonstrating better performance compared to imaging-only pretraining.\n2. Comprehensive Evaluation: Extensive comparisons across multiple tasks validate the robustness and efficiency of the TTE-based approach across different architectures."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a future-guided pretraining approach using time-to-event supervision to enhance the prognostic capabilities of 3D medical imaging models. By incorporating longitudinal EHR data into the pretraining process and predicting time-until-event, the model outperforms traditional methods across multiple standard tasks, as demonstrated by thorough experiments."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Dependence on Large EHR Datasets: This approach relies on extensive, high-quality EHR data, which many medical datasets do not include.\n2. Limited Modality Scope: Tested only on CT images; broader modality testing could validate versatility across imaging types.\n3. Interpretability: The TTE pretraining’s impact on specific pixel-level biomarkers is less clear; additional analysis on feature attribution could help."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- Propose utilizing the time events as pre-training tasks specially designed for prognosis tasks in downstream applications. \n- The manuscript is overall easy to follow"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors proposed to utilize the time-to-event information in EHR that paired with the imaging data as a form of supervision for the pre-training purpose. A public dataset with both 3D images and EHR notes is employed for the pre-training and downstream applications. Another dataset without the time events is also used for the evaluation of model adaptation. The manuscript is easy to follow. However, it also suffers from several critical flaws, which are detailed below."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The proposed method is limited in generalization since it will require longitudinal time-to-event EHR data as the supervision for the pre-training. In comparison to the common self-supervised pre-training, the proposed methods are harder to scale up.\n\n- There is no comparison evaluation between the proposed method and prior methods in model pre-training. Only the results of the proposed method with different model architectures are reported. It will be difficult to appreciate the benefits of the proposed method.\n\n- The selected model architecture also raises questions since there are many popular model networks in medical imaging, e.g., 3D-UNet, ViT, etc. It will be helpful to see their performance compared to the vanilla ResNet. \n\n- Baselines without the pre-training process should also be reported.\n\n- The current setting utilizes public data for both pre-training and downstream applications. Having a separate evaluation dataset of a prognosis task will be helpful. \n\n- The proposed method is limited in technical innovation, though utilizing the time-to-event data as a form of supervision is relatively new in the pre-training. Mostly existing techniques are adopted for the pre-training."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "- What exactly does it mean that Steinberg et al.’s method was used to “[sample tasks to maximize entropy given the frequency distribution of medical codes populating the DAG”? I feel that a basic plain-language description of the motivation for this procedure is needed first: why is this method being applied at all? Are there way more than 8k events and the goal is to settle on a subset of 8k “meaningful”/common ones for pretraining? I don’t understand the motivation.\n- Unless I am misunderstanding, this is the only description of the TTE pretraining procedure and labels used: “We define our TTE task labels by predicting the time until the next occurrence of a medical code.” The previous Section 3 described deep survival modeling in the abstract, so I expected Section 4 to more concretely describe how TTE pretraining works. Is this a “competing risks” approach, where multiple events are being modeled simultaneously (in “multi-label” fashion)?\n- What are the 8,192 EHR tasks/events? I’m aware it would be cumbersome or impossible to list and define them all, but any reasonable attempt to convey information about them would be useful. What kinds of “events” are they? What are some examples?\n- Related to the above point, are the downstream labels *also* present in the set of TTE pretraining tasks? If so, isn’t there concern of “label leakage”, where the model has been pretrained on label information present in the downstream training dataset? Please clarify this.\n\n**Minor comments/questions:**\n- Line 13: Maybe “build” instead of “capture” since you use this word in the next sentence.\n- In-text citation style seems off – should be parenthetical (\\pcite{}) in most cases when used at end of sentence/clause: “Sox et al. (2024)” -> “(Sox et al., 2024)”\n- Change “e.g.” -> “e.g.,” throughout\n- Would include more recent references [1,2] when discussing deep prognosis models on longitudinal medical imaging (first paragraph of Section 2)\n- “i.e. 8192” -> “i.e., 8.192”\n- “Our approach improves training data efficiency, increasing training labels by an average of 3x over labels assigned to patients based on their current EHR visit.” This is a bit unusual to highlight as a main contribution – I don’t think readers will understand what “increasing training labels” means without having read the entire paper (nor why this impact data efficiency). Perhaps clarify language here to indicate that your approach provides 3x as many sources of supervision during SSL + that this is what provides data efficiency benefits.\n- “Pretraining task labels as assigned per-CT scan and vary in density based on pretraining approach, see Figure 2.” Perhaps “as assigned” is meant to be “are assigned”? Also change “, see Figure 2” -> “(Figure 2)”.\n- Be consistent with “c-statistic” vs. “C-statistic”\n\n**References**\n[1] Holste, Gregory, et al. \"Harnessing the power of longitudinal medical imaging for eye disease prognosis using Transformer-based sequence modeling.\" NPJ Digital Medicine 7.1 (2024): 216.\n[2] Sriram, Anuroop, et al. \"Covid-19 prognosis via self-supervised representation learning and multi-image prediction.\" arXiv preprint arXiv:2101.04909 (2021)."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The presentation quality is very high. Care has been taken to logically organize the paper, clearly articulate key points, and straightforwardly present results with concise figures and tables.\n- The core idea is creative, making use of the wealth of longitudinal EHR data associated with each 3D volume for pretraining.\n- Discussion or related work and background is particularly strong.\n- Experiments are sufficiently thorough and easy to interpret – results are convincing."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a self-supervised learning (SSL) method for 3D medical imaging data that leverages electronic health records (EHR) to provide extra sources of supervision via time-to-event modeling. The proposed method, future-guided pretraining, performs time-to-event (TTE) survival modeling of various medical events in the longitudinal EHR associated with each 3D scan. The authors show that future-guided pretraining consistently improves downstream TTE modeling and prognostic classification tasks – also improving data efficiency – without degrading standard diagnostic classification performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The actual description of the TTE pretraining approach is brief (lines 184-191) and somewhat unclear. I would advise the authors to flesh out this section. See specific questions below.\n- A description or list of the 8,192 EHR pretraining tasks is never provided. I’m aware there may not be a convenient place to list this many items, but a general description of categories of events or a few illustrative examples would be helpful. Without this information, it’s impossible to assess whether, e.g., one the TTE pretraining tasks is *also* used as a downstream TTE modeling task. In this case, there may be concerns of “label leakage”.\n\nI’m happy to increase my score once these issues are addressed – this is an otherwise strong submission."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024futureguided,\ntitle={Future-Guided Pretraining via Time-to-Event Supervision for 3D Medical Imaging},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zcTLpIfj9u},\nnote={under review}\n}"
},
"abstract": {
"value": "In medicine, making effective treatment decisions requires detecting early warning signs of disease. With the rise of 3D medical foundation models, there is promise in large-scale pretraining to capture new and more informative imaging biomarkers associated with future disease risk. Current self-supervised learning (SSL) techniques for 3D medical imaging largely capture structural properties via reconstruction and contrastive losses – local features that provide only indirect signal on disease progression. Electronic health records (EHRs) present an underutilized resource for future information, offering an easily paired and scalable amount of weak supervision representative of patient outcomes. To this end, we propose future-guided pretraining to explore the benefits of training 3D image encoders on future medical events. By combining classic techniques from timeto- event modeling and recent pretraining techniques using longitudinal event data from EHRs, we show that future-guided pretraining enhances the ability to predict future patient outcomes (average AUROC increase of 25.3% and time-dependent c-statistics increase of 23% compared to baseline models) without degrading the ability to perform standard binary classification tasks (e.g. image labeling for diagnostic tasks). This study lays the groundwork for innovative ways to combine EHR and imaging modalities for clinical risk prediction."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Multimodal learning",
"medical imaging",
"Electronic Health Records"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/7182cb8f90f4007cf4bfdc2657cf7b8003f907e4.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/c71efe072928f1231f81c695dd974b380be23b8c.zip"
},
"title": {
"value": "Future-Guided Pretraining via Time-to-Event Supervision for 3D Medical Imaging"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zcx6rIMbbR | Efficient Fine-Tuning of Quantized LLMs via Three-Stage Optimization | main | Active | Efficient Fine-Tuning;NLP;Iterative Optimization;Layer-wise Quantization and Low-Rank Configuration | transfer learning, meta learning, and lifelong learning | 3;5;5;5;6 | 4;4;3;4;3 | 2;3;2;2;4 | 2;2;2;2;3 | 3;2;2;3;3 | 4.8 | 3.6 | 2.6 | 2.2 | 2.6 | -0.583333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "NA"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper presents the clear motivation that the performance of fine-tuning on the adjusted quantized models is even worse than using the original quantized models directly.\n\n- Low-precision models fine-tuned with QR-Adaptor can surpass the 16-bit fine-tuned models, while maintaining memory usage comparable to that of 4-bit quantized models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Based on the motivation that the performance of fine-tuning on the adjusted quantized models is even worse than using the original quantized models directly, the paper introduced QR-Adaptor that bypasses the network errors introduced by quantization and directly uses actual performance and memory as optimization targets. The experimental results are based on Llama 2 7B and 13B."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- It would be necessary to conduct experiments for Llama-3 family (e.g., Llama 3 8B), which are known to be harder to quantize.\n\n- The comparison of training time between QR-Adaptor and existing methods would be required because QR-Adaptor seems to take longer than previous methods due to the presence of bayesian optimization.\n\n- It would be more beneficial if prior methods are also done with 6.125-bit for Llama 2 13B and 5.875-bit for Llama 2 7B in Table 1."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Question\n1. What are the differences between NSGA-II and PRGA?\n2. Is the proposed method sensitive to the selection of iterations and population size?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Strengths:\n1. Overall, the paper is well-organized and easily comprehensible. The motivation is effectively introduced, and the methodology is clearly described.\n2. The method's introduction of gradient-free optimization to the fine-tuning of quantized LLMs is noteworthy and provides valuable insights for future research in this area.\n3. The proposed approach demonstrates superior performance in terms of both memory efficiency and model performance compared to state-of-the-art works in the same domain."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Summary\nThe authors propose a novel method for fine-tuning quantization LLM. The core of this approach is a three-stage optimization process that selects quantization bit-widths and corresponding LoRA ranks for each layer of the model. Initially, the method computes layer-wise importance on a small dataset, which serves as the initial values for bit-widths and ranks. Subsequently, the authors employ their proposed Pareto Ranking Genetic Algorithm (PRGA) optimization method, followed by Bayesian optimization, to identify more optimal solutions. The efficacy of this method is demonstrated through experimental validation on datasets such as MMLU, showcasing its superiority in terms of both memory efficiency and performance metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Weaknesses:\n1. The author claims that\"inspired us to develop the Pareto Ranking Genetic Algorithm (PRGA), a novel multi-objective optimization method.\"The proposed Pareto Ranking Genetic Algorithm (PRGA) bears a striking resemblance to the existing Non-dominated Sorting Genetic Algorithm II (NSGA-II), to the extent that they are virtually indistinguishable. However, the authors have failed to acknowledge or cite NSGA-II, instead claiming PRGA as a \"novel multi-objective optimization method\".PRGA and NSGA-II are almost identical, including key elements such as non-dominated sorting, crowding distance calculation, and elitist strategy. \n2. The novelty of this paper appears limited, as it primarily applies existing algorithms, namely NSGA-II and Bayesian Optimization, to the fine-tuning of quantized LLMs. \n3. The authors claim that previous methods relying on gradient norms to quantify layer importance fail to accurately represent a layer's contribution during inference. However, they do not substantiate this claim with ablation studies. \n4. The current ablation experiments are insufficient. Additional studies should be conducted to demonstrate the impact of iterations and population size on the results. \n[1]Deb K, Pratap A, Agarwal S, Meyarivan TAM. A fast and elitist multi-objective genetic algorithm: NSGA-II[J]. IEEE Transactions on Evolutionary Computation,2002, 6(2):182-197."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In addition to the weaknesses, I have the following questions:\n1.I am curious about the effectiveness of directly applying the approach of this article to LLMs quantization, that is, using gradient-free optimization methods to select the quantization bit numbers for each layer's parameters.\n2.AdaLoRA is not specifically designed for quantized LLMs, and its direct performance may be poor. Therefore, concluding that dynamically adjusting rank is not suitable for fine-tuning quantized LLMs may not be sufficiently justified. Can we test AdaLoRA's performance on fine-tuning quantized LLMs again under the condition of Preserving quantized model parameters before fine-tuning?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.This article proposes the use of gradient-free optimization methods to optimize the rank selection of layer-wise LoRA and the bit selection of layer-wise Quantization, which is quite novel.\n2.This method could be combined with other quantization methods to potentially achieve better performance.\n3.The results on datasets such as MMLU show that QR-Adaptor has achieved excellent performance in both memory and accuracy.\n4.Ablation studies indicate that the proposed three-stage optimization framework effectively yields superior solutions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a framework called QR-Adaptor that combines parameter-efficient fine-tuning and quantization techniques to improve the performance of LLMs with reduced memory usage. The QR-Adaptor framework includes three stages: initialization based on task information, global exploration using Pareto Ranking Genetic Algorithm (PRGA), and local refinement with Bayesian optimization. Experimental results show that the method outperforms fine-tuned 16-bit models while maintaining the same memory usage as fine-tuning 4-bit models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The introduced multi-stage optimization process increases the time cost.\n2.The experiments in this article are limited, conducted only on Llama2, and the datasets used are not diverse enough. If considering expanding the experiments, one could refer to the experiments in the LoFTQ paper.\n3.There is a lack of experiments on the impact of PRGA hyperparameters on model performance.\n4.There is a lack of comparative experiments between the PRGA method and other multi-objective optimization methods.\n5.Figure 1 is somewhat difficult to understand and should not be placed on the first page."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Why does QR-Adaptor consistently outperform LoRA fine-tuning with 16-bit models? Is the advantage due to adaptive LoRA ranks, considering FP16 models are typically more powerful than quantized models?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The framework is practically useful, allowing for higher-performance fine-tuned models with a 4-bit memory footprint.\n2. The paper is well-written and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses quantized parameter-efficient fine-tuning. It proposes two constraints: initializing LoRA parameters either as zero or using MSE initialization like LoftQ and LQ-LoRA, while fixing all trainable parameters. Additionally, it introduces mixed-precision quantization and mixed-rank LoRA, achieving higher performance with the same training memory footprint as 4-bit models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The constraints are derived from limited experiments in Figures 2 and 3. For instance, Figure 2 suggests careful LoRA initialization does not improve performance, yet LoftQ and LQ-LoRA demonstrate its effectiveness. LoRA initialization can mitigate quantization loss, crucial for models with significant quantiztaion loss, such as lower-bit quantizations or more challenging models like llama-3-8B. A deeper analysis with stronger experiments and detailed discussion on LoRA initialization is needed.\n\n2. The paper heavily focuses on the two constraints, which seem more like ablation studies and do not offer new insights or motivation for the final methods. The main contribution is achieving higher performance with the same memory footprint as 4-bit models. The paper should be reorganized to highlight its original contributions.\n\n3. The performance improvements could be attributed to higher-bit models and reduced memory footprint through adaptive LoRA rank reduction. Since small LoRA ranks may not perform well on large datasets, it's important to verify the method's effectiveness on larger datasets."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. My main concern was the extra time cost, could you provide comparisons with existing methods in terms of time cost? Can the computational cost of each stage be disclosed?\n2. The caption of the subfigure in Figure 7 needs to be supplemented.\n3. Will the bad performance affected by unfixed parameters mentioned in Figure 3 improve with longer fine-tuning epochs? This does not seem to be a very intuitive phenomenon, can the author provide more explanation?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The framework outlined in Figure 1 is resonates. Figures 2 and 3 effectively illustrate the key observations and the rationale behind our approach."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "They bypasses the network errors introduced by quantization and directly uses actual performance and memory as optimization targets. Through initialization, extrapolation, and interpolation, they quickly solves the gradient-free bit-width and lora rank optimization problem of fine-tuned low-bit quantized models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This paper is credible in its approach, but lacks a good logical structure in presenting the corresponding challenges. In the abstract \"We find that the performance of finetuning on the adjusted quantized models is even worse than using the original quantized models directly, as the adjusted model is essentially a completely different model from the original quantized model. \", the adjusted quantized models is a vague statement, and these unclear statements affect my understanding. Therefore, I expect the authors to reformulate the three challenges of the necessity of init/search r and q layer-wise and adopting a gradient-independent strategy in Introduction."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Method of obtaining a high-performance low-precision model."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024efficient,\ntitle={Efficient Fine-Tuning of Quantized {LLM}s via Three-Stage Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zcx6rIMbbR},\nnote={under review}\n}"
},
"abstract": {
"value": "To address the memory consumption and computational efficiency issues in fine-tuning large language models (LLMs), Parameter-Efficient Fine-Tuning (PEFT) and quantization have emerged. Recent studies have combined the two and have proposed adjusting parameters before fine-tuning to reduce quantization errors, aiming to improve fine-tuning performance. We find that the performance of fine-tuning on the adjusted quantized models is even worse than using the original quantized models directly, as the adjusted model is essentially a completely different model from the original quantized model. Additionally, we have discovered that due to the poor robustness of quantized models, increasing the training difficulty may result in even worse outcomes. To address this, we propose two constraints for fine-tuning quantized models, and based on these, we introduce a general fine-tuning framework called QR-Adaptor. This framework bypasses the network errors introduced by quantization and directly uses actual performance and memory as optimization targets. Through initialization, extrapolation, and interpolation, it quickly solves this gradient-free optimization problem. Experimental results demonstrate that our method yields fine-tuned low-bit quantized models that outperform fine-tuned 16-bit models while maintaining the same memory usage as fine-tuning 4-bit models. For example, in the zero-shot test on MMLU, it improves accuracy by 3.3\\% over both LoftQ and LQ-LoRA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Efficient Fine-Tuning",
"NLP",
"Iterative Optimization",
"Layer-wise Quantization and Low-Rank Configuration"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/19f19c690754f88b94fff5ab6f29eae2574eb009.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/937128adfcf9dc70f5943a0dcd6e34c6db47dc2b.zip"
},
"title": {
"value": "Efficient Fine-Tuning of Quantized LLMs via Three-Stage Optimization"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zd0iX5xBhA | System 1.x: Learning to Balance Fast and Slow Planning with Language Models | main | Active | Large Language Models;Planning | foundation or frontier models, including LLMs | 1;3;3;3 | 5;5;4;4 | 1;2;3;2 | 1;1;2;2 | 2;2;3;3 | 2.5 | 4.5 | 2 | 1.5 | 2.5 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The term plan validity should not be used interchangeably with plan accuracy. Validity is a binary concept that indicates whether a plan meets all required conditions or constraints (i.e., valid or invalid), whereas accuracy implies a measure of correctness or closeness to an ideal solution, which can vary in degree.\n2. The definition of a sub-goal as a \"pair of states\" adds unnecessary complexity and deviates from the usual understanding. Sub-goals are typically single intermediate states that help guide progress toward the final goal.\n3. The hardness function defined for Blocksworld, in Section A.3, seems wrong. It states that if a block is not on the table and not in its goal position, an additional 1 is added to the hardness cost. However, this approach doesn't account for scenarios where a block is in the air after a pick-up action—this intermediate position may not be the goal but could be one step away from it. Adding to the hardness cost in such cases could mistakenly label states as more challenging, even when they are actually closer to the goal. Similarly for the Maze Navigation task, counting all obstacles in the sub-maze as part of the hardness cost is problematic, as obstacles outside the plan trajectory can inflate the difficulty inaccurately.\n4. In Section 2.3, under Controller Training Data, Step 3, it’s unclear why only a contiguous chunk should be assigned to System-2. What is the rationale for restricting it to contiguous data?\n5. According to the authors' definition of sub-goals, are they explicitly training the Controller to decompose the task into exactly three sub-goals?\n\n**Minor Comments:**\n1. Examples in the figures, particularly Figure 2, would benefit from clearer annotations in the maze to improve the interpretation of the numbering. The current numbering follows a top-to-bottom, left-to-right order, but this is not immediately clear.\n2. In Figure 2, System-2 output, I do not understand why this is invalid “Exploring action left State [3, 1] is invalid”?\n3. The authors should maintain consistency in their notation. In Section 2.3, when discussing Controller training data, the notation for initial and goal states switches from $s_i$ and $s_j$ in Step 1 to $s_0$ and $s_g$ later in the paragraph.\n\n\n**References:**\n\n[1] Fabiano, F., Pallagani, V., Ganapini, M.B., Horesh, L., Loreggia, A., Murugesan, K., Rossi, F. and Srivastava, B., 2023, December. Plan-SOFAI: A Neuro-Symbolic Planning Architecture. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "An innovative feature of this hybrid approach, which combines quick solutions from System-1 with thorough search-based solutions from System-2, is a user-controlled parameter x (between 0 and 1) that adjusts how much System-2 is used compared to System-1. This allows users to balance speed and accuracy based on the task’s needs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes a hybrid planning system that combines a large language model (LLM) fine-tuned to act as either a System-1 or System-2 planner, with a Controller that decomposes tasks into sub-goals based on difficulty. The Controller generates a meta-plan by assigning \"easy\" sub-goals to System-1 for faster solutions and \"hard\" sub-goals to System-2 for more deliberate, search-based solutions. The final plan is constructed by concatenating sub-plans from both systems. However, it faces limitations due to compounding errors across systems, particularly as task complexity grows."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. A significant limitation of this approach is its dependency on multiple components performing flawlessly: System-1 must solve the easy sub-goals without hallucinating, System-2 must solve the harder sub-goals using search trajectories accurately, and the Controller must correctly decompose and assign tasks between the two systems. This compounded reliance on error-free performance from each component raises concerns about the approach’s reliability, especially as problem complexity increases. For even moderately difficult tasks, or as the number of objects scales, this method may struggle to maintain effectiveness, as errors are likely to accumulate across the system.\n2. A limitation of this hybrid approach is that selecting the x value too greedily can hurt performance and increase training computation costs. Meanwhile, setting x too high leads to more thorough searches by System-2, which increases compute requirements and makes the approach similar to existing methods like fine-tuning LLMs for planning or SOFAI architectures [1].\n3. For System-2, the hardness function should be explicitly defined for each specific domain in which the planner operates. Without domain-specific definitions, the hardness function risks being too generalized, leading to inaccurate difficulty assessments that don’t align with the unique challenges of each task."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- You make the case that System-1.x is advantageous over inference-time methods relying on symbolic planners since they may not exist for certain domains, yet System-1.x relies on defining a hardness function, which is essentially a heuristic function. How could System-1.x be leveraged in new domains where a user is not a domain expert?\n- In addition, it is non-trivial to construct hardness functions for multi-task domains like Alfworld [1], Robotouille [2] or Mini-BEHAVIOR [3]. How would System-1.x be applied to such domains?\n- There appears to be a tradeoff in Figure 3 with accuracy and states explored with Sys-1.5 with and without subgoals. Sys-1.5 with subgoals achieves higher performance with less states explored but as the state exploration budget increases, no subgoals is better. Why is that?\n\n[1] Alfworld: Aligning text and embodied environments for interactive learning (Shridhar et. al 2020)\n\n[2] Demo2Code: From Summarizing Demonstrations to Synthesizing Code via Extended Chain-of-Thought (Wang et. al 2023)\n\n[3] Mini-BEHAVIOR: A Procedurally Generated Benchmark for Long-horizon Decision-Making in Embodied AI (Jin et. al 2023)"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "$Originality:$ This paper introduces a new method to collect data to finetune a hybrid System 1/2 system whose hybridization factor x can be customized during train and test time.\n\n$Quality:$ The figures and methodology are well-done and the experiments cover relevant baselines and ablations.\n\n$Clarity:$ The paper is well-written and clear.\n\n$Significance:$ Constructing LLM planners that can outperform classical planning approaches with minimal exploration is very important in domains where querying the world model can be expensive (e.g. real world systems like robotics)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces System-1.x which acts as a hybrid System 1 / System 2 planner that outperforms fully System 1 and 2 systems, an A* planner, and a System-1.x variant without subgoal decomposition. This paper contributes a method for collecting data to train a System-1.x system through the introduction of domain-specific hardness functions to determine which of the collected samples should be annotated for System-1 (easy samples) or System-2 (hard samples)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The introduction makes the following claim\n\n\"System-1.x is a fully LLM-based neural planner ... not relying on any external solvers or verifiers... it has an advantage over inference-time methods that rely on symbolic planners which may not exist for certain domains.\"\n\nHowever, System-1.x strongly rely on the presence of a hardness function, which is essentially a heuristic function. This assumption is too strong and cannot simply be listed as a limitation of this approach; the domains that are tested on could be purely solved using classical planners like A* with access to the hardness (heuristic) function and a valid plan would be retrieved (regardless of consistency or admissibility of the hardness function) much faster than any LLM approach in terms of inference time.\n\nTo improve this paper, evaluating on environments where classical planners cannot be trivially applied would give more credibility to this LLM approach. A user-specified hardness function cannot be provided for the environment since a classical planner would be able to utilize this along with the other environment information provided to the System-1.x planner and quickly find a valid plan. \n\nOne potential idea is utilizing an LLM to provide a hardness value for a pair of states as other works have done, using an LLM to approximate a heuristic function [1,2,3]. This would require some new experiments to show the effectiveness of the hardness function and would necessitate a new approach other than the sliding window mechanism to be token-efficient.\n\nThis is a very important problem to solve in this space and I wish the authors the best of luck.\n\n[1] Large Language Models as Commonsense Knowledge for Large-Scale Task Planning (Zhao et. al 2023)\n\n[2] Reasoning with Language Model is Planning with World Model (Hao et. al 2023)\n\n[3] Toolchain*: Efficient Action Space Navigation in Large Language Models with A* Search (Zhuang et. al 2023)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. In the experimental setup, the authors mention that the hybridization factor x allows System-1.x to control the degree of System-1 versus System-2 planning. How sensitive is this hybridization factor to changes in task complexity? Were there significant trade-offs observed with larger values of x in different domains?\n2. In discussing Blocksworld’s out-of-distribution generalization, the paper mentions that System-1.x effectively utilizes System-1 for ID sub-goals while reserving System-2 for OOD sub-goals. Could the authors explain how the controller identifies which sub-goals are in-distribution vs. out-of-distribution during inference?\n3. The authors claim that System-1.x can match or exceed a purely System-2 Planner’s accuracy with fewer states explored. What specific methods were used to prevent overfitting to the training traces, especially for tasks requiring extensive state exploration?\n4. Why is there no baseline comparison against other fast and slow architectures used for solving planning problems? It is not clear to me what is the advantage of using this approach against architectures like Plan-SOFAI that are both optimal and resource-efficient compared to System 2 solvers for planning tasks."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "System-1.x shows robust adaptability and performance across different planning tasks (e.g., Maze Navigation, Blocksworld), outperforming both purely System-1 and System-2 planners due to its effective sub-goal decomposition."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes System-1.x, a hybrid planning framework using language models to balance fast, intuitive System-1 planning with deliberate, accurate System-2 planning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The related work section of the paper does not cover papers similar to this work. To cite a few papers that should have been used as baselines to compare System 1.x against - \n[1] Katz, M., Kokel, H., Srinivas, K., & Sohrabi, S. (2024). Thought of Search: Planning with Language Models Through The Lens of Efficiency. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24.\n[2] Fabiano, F., Pallagani, V., Ganapini, M. B., Horesh, L., Loreggia, A., Murugesan, K., ... & Srivastava, B. (2023, December). Plan-SOFAI: A Neuro-Symbolic Planning Architecture. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models.\n[3] Webb, T., Mondal, S. S., Wang, C., Krabach, B., & Momennejad, I. (2023). A Prefrontal Cortex-inspired Architecture for Planning in Large Language Models. arXiv preprint arXiv:2310.00194.\nRefer to the questions for more weaknesses."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Figure 3 seems to be cropped in a way that makes the method appear to be better than it is, and in general the reported metrics are simply not meaningful (see weaknesses)."
},
"flag_for_ethics_review": {
"value": [
"Yes, Potentially harmful insights, methodologies and applications"
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "In the results section for blocksworld, which numbers are from in-distribution testing and which are from out-of-distribution testing?\n\nWhat guarantees does System 1.75 have?\n\nDid you consider wall-clock time budgets? This would be a more fair comparison. Surely the authors know that querying LLMs repeatedly takes far longer than querying A*, so I'm curious why these times weren't reported in the paper?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "In general, the goal of creating a neurosymbolic planning system is a very good one. However, the one presented in this paper is preliminary. \n\nI really like the idea of having a hybridization factor. It could be interesting to have a way to control this at inference time rather than at training time, i.e. like iterative-deeping based searches use more search budget when given more time."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper attempts to create a mixed-mode planning system that both immediately generates plans without deliberate state-space search, and does deliberate state space search.\n\nHowever, this paper seems deliberately misleading in terms of what it claims, namely the improvements over A*."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While this work attempts to build a neurosymbolic planning system, which is a good goal, it seems like the work may not be ready for publication at ICLR. \n\nThe main results of the paper rest on a claim about the \"States explored\" metric, however, there are several issues with this evaluation. Primarily, computational costs of querying LLMs are not accounted for. A single pass of an LLM clearly can do a significant amount of computation. Furthermore, intermediate LLM-generated states are never checked, e.g. they could contain hallucinations, so it doesn't seem like they'd make a reasonable metric. More importantly there is simply no guaranteed correspondence between intermediate states inside of A* and reported intermediates produced by an LLM to the degree that the two can be compared meaningfully. Because of this, it is unreasonable to restrict A* to run in correspondence with the LLM output, when a full A* computation may several orders of magnitude less than a computational single pass of an LLM. Since the paper rests on the states explored metric, I highly encourage the authors to propose a better method for comparing computational costs between LLMs and A*. \n\nAnother ommission is the fact the the overall system is trained by calling A* thousands of times, which makes an efficiency argument extremely questionable. Regardless of the current state of such an argument, I do buy into the general idea that mixed-system planning algorithm could be more efficient than a system-2 one and more accurate than a system-1 one, but I think the authors would need to fairly evaluate this to make progress on building such an algorithm.\n\nThe other main metric used in the paper is \"valid plan accuracy\". Such a metric is not meaningful when using systems with guarantees, namely A*, which is guaranteed to find optimal plans, while the proposed system has no such guarantees. Because of the false equation between A* intermediate states and LLM-output, the budget restriction is not meaningful and skews the accuracy metric significantly. For instance, if wall time budget were used instead, A* would simply have an accuracy of 100%. Reporting otherwise is potentially misleading. \n\nThe paper claims to test out-of-distribution blocksworld, but it is unclear from figure 1 which results are out of distribution testing and which are in-distribution testing. Also, the left hand side of figure 1 has a correct maximum accuracy of 100%, but the right hand side has a maximum accuracy of only 30%, which seems extremely misleading. Can you clearly label which results in Figure 1 are from in-distribution versus out-of-distribution testing and present the results in Figure 1 with a uniform scale 0-100?\n\n\"model-based\" on line 144 seems misused, namely confusing parameterized models like neural networks with transition models used in planning and RL. In general, some terminology is unconventional or misused. \n\nline 253: \"we assume that they can be decomposed into sub-goals\". This is a very dangerous assumption that rules out true planning problems, as often sub-goals interact. This is the case for blocksworld (but not the maze task), so since the blocksworld dataset has been filtered in this way, it no longer represents the original task and is in fact far simpler to solve [1]. The method for decomposing plans is not obvious, and the sliding window approach, while interesting, is certainly not guaranteed to decompose an arbitrary problem successfully. In the worst case, the system must always be able to fall back on System 2. \n\nThe system is given tiny datasets and (for mazes) tested within distribution. 5x5 mazes are far too small and A* can scale to much bigger problems, while it's unclear if the proposed LLM-based approach does. The relatively high system 1 performance is a good indicator that parts of the problem are being memorized -- and in the case of mazes it is clear how this is possible since the task has decomposable subgoals that can be independently memorized. It's good that out-of-distribution was considered for blocksworld, but the results section doesn't always distinguish between numbers reported from in-distribution testing and out-of-distribution testing. \n\nIn general, planning is misunderstood. By default A* is sound, complete, and produces optimal plans. In contrast, an LLM generator is not sound, not complete, and not optimal. When A* is limited by search budget, it is not the same algorithm, but still will never produce an incorrect plan, and if given enough time will produce the optimal plan. In contrast, an LLM (or system that uses one) is capable of producing incorrect plans, and has no optimality guarantee even if given infinite computation. The proposed method does not have any guarantees, wheras systems like LLM-Modulo do have soundness guarantees (but not completeness!). Please consider adding a discussion section that explicitly compares the theoretical guarantees and limitations of A* versus the proposed LLM-based approach. This could help readers better understand the tradeoffs involved and the contexts in which each approach might be most appropriate.\n\n[1] Sussman, G. J. (1973). A computational model of skill acquisition.\n\nSee also: https://en.wikipedia.org/wiki/Sussman_anomaly"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024system,\ntitle={System 1.x: Learning to Balance Fast and Slow Planning with Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zd0iX5xBhA},\nnote={under review}\n}"
},
"abstract": {
"value": "Language models can be used to solve long-horizon planning problems in two distinct modes. In a fast 'System-1' mode, models directly generate plans without any explicit search or backtracking, and in a slow 'System-2' mode, they plan step-by-step by explicitly searching over possible actions. System-2 planning, while typically more effective, is also computationally more expensive and often infeasible for long plans or large action spaces. Moreover, isolated System-1 or System-2 planning ignores the user's end goals and constraints (e.g., token budget), failing to provide ways for the user to control the model's behavior. To this end, we propose the System-1.x Planner, a framework for controllable planning with language models that is capable of generating hybrid plans and balancing between the two planning modes based on the difficulty of the problem at hand. System-1.x consists of (i) a controller, (ii) a System-1 Planner, and (iii) a System-2 Planner. Based on a user-specified hybridization factor x governing the degree to which the system uses System-1 vs. System-2, the controller decomposes a planning problem into subgoals, and classifies them as easy or hard to be solved by either System-1 or System-2, respectively. We fine-tune all three components on top of a single base LLM, requiring only search traces as supervision. Experiments with two diverse planning tasks -- Maze Navigation and Blocksworld -- show that our System-1.x Planner outperforms a System-1 Planner, a System-2 Planner trained to approximate A* search, and also a symbolic planner (A* search), given an exploration budget. We also demonstrate the following key properties of our planner: (1) controllability: by adjusting the hybridization factor x (e.g., System-1.75 vs. System-1.5) we can perform more (or less) search, improving performance, (2) flexibility: by building a neuro-symbolic variant composed of a neural System-1 planner and a symbolic System-2 planner, we can take advantage of existing symbolic methods, and (3) generalizability: by learning from different search algorithms (BFS, DFS, A*), we show that our method is robust to the choice of search algorithm used for training."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Models",
"Planning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/889ece5d1a6f7e51133d30bdc7b00499c7e4d9a2.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/1bc810d34ffe288b02ab33698d76b8bc202e2e19.zip"
},
"title": {
"value": "System 1.x: Learning to Balance Fast and Slow Planning with Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zd5Knrtja4 | TimeRAF: Retrieval-Augmented Foundation model for Zero-shot Time Series Forecasting | main | Active | time series forecasting;retrieval augmented generation;time series foundation model | learning on time series and dynamical systems | 3;3;6;6 | 4;4;4;3 | 2;3;3;3 | 2;2;3;2 | 2;3;3;3 | 4.5 | 3.75 | 2.75 | 2.25 | 2.75 | -0.57735 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weaknesses."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1.\tPaper writing is good. There is no difficulty in understanding the paper.\n\n2.\tThe proposed method is straightforward and achieves state-of-the-art performance.\n\n3.\tComprehensive ablation studies and other evaluations are conducted to check the effects of the model"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposed TimeRAF, a Retrieval-Augmented Forecasting model that enhances zero-shot time series forecasting by using customized knowledge bases and a learnable retriever to integrate external knowledge, demonstrating significant improvements across various domains and datasets."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe major novelty is the utilization of RAF for time series modeling. However, in terms of RAF itself, the novelty is quite limited.\n\n2.\tThe difference with the previous RAF for time series is unclear. In the related work section, the author claimed that ‘existing time series RAG methods are either limited to historical data or do not support foundation models’ and provided two citations. However, it seems the gap is not significant. Could the authors elaborate more on this motivation?\n\n3.\tIn line 192, the authors claimed ‘retrieve information solely from datasets that are distinct from the input source’. What is the distinction here? How to define it.\n\n4.\tIn line 226, the authors mentioned ‘By weighing the importance of different components of the combined embedding’. However, it seems the average of the embeddings is calculated. Thus, how to weigh them.\n\n5.\tIn line 244, the authors mentioned ‘it is challenging to guarantee that retrieved candidates’. What is the reason?\n\n6.\tIn the ablation study of the choice of knowledge base, the authors mentioned ‘TimeRAF engages a meticulously curated multi-domain dataset’. This implies the domain of an input time series should exist in the base. However, what would happen if a new domain emerges (e.g., stock) but the base contains no such time series?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Q1. On the Electricity dataset, the performance of TimeRAF is not the best. Could you analyze the reasons for this?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "S1. The manuscript is well-written. And, the presentation of figure is clear and easy to understand.\n\nS2. The experiments are comprehensive, and provide some interpretable discussion to analyze why the model is good.\n\nS3. The research is novel and unresearched in the field."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduce a novel framework TimeRAF designed to leverage retrieval-augmented generation for zero-shot time series forecasting. The authors develop customized time series knowledge bases that are tailored to the specific forecasting tasks and employ an end-to-end learnable retriever to extract valuable information from the knowledge base. Through comprehensive experimental discussions, the manuscript demonstrates the effectiveness of the proposed approach beyond existing works."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1. Could you provide the source code to enhance the reviewers' trust in the reproducibility of your work?\n\nW2. The introduction of the retrieval process inevitably leads to increased time cost, so I am concerned about the model's efficiency. I believe it is essential to add some theoretical analysis and empirical discussions.\n\nW3. Have you considered the issue of label leakage introduced by RAG in the process of building the knowledge base? Different datasets may have very similar or identical temporal patterns, which could also lead to potential label leakage problems."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The details of retrival, how much data is used for forecasting after retrieval in TimeRAF?\n2. The memory and time costs associated with the retrieval process.\n3. The pre-training backbone appears too powerful, limiting the visible improvements. Have the authors explored replacing the backbone with alternative foundation models to better understand the effectiveness of RAF?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-written and easy to understand\n2. It combines retrieval-augmented models with TS, contributing to the efficient model adaptation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a method for few-shot and zero-shot time series forecasting. This method applies an RAG style to aids forecasting, where it retrieves some helpful data from a pre-collected dataset. The retriever is learnable and can calculate retrieval scores to select most relevant data. To integrate retrieved data, the author proposes a Channel Prompting way to extract valuable information. Overall the paper is easy to understand."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.The improvement shown by RAF in Fig. 3 is marginal(only 0.00x improvement in most datasets), which is the core issue and raises concerns about whether the pre-trained model used is already too powerful, thus reduces the necessity of RAF process.\n\n2. The author should give the memory usage for storing retrieval data and its time cost, especially when compared to storing a foundation model directly.\n\n3. The evaluation datasets are limited. It would be valuable to assess the model’s performance on cross-domain datasets that extend beyond the original knowledge base."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How scalable is the Channel Prompting mechanism for high-dimensional datasets with many channels?\n2. How does TimeRAF ensure consistent scaling between retrieved external time series data and input data to avoid magnitude mismatches?\n3. Why do the authors claim a zero-shot setup if the model fine-tunes on test data? How does this setup align with the standard definition of zero-shot forecasting?\n4. What datasets and loss functions were used to pre-train the TSFM? How does the pre-training contribute to the model’s performance? \n5. Can the authors provide results for other forecast lengths (e.g., 192, 336, and 720) to validate TimeRAF's generalization capability across multiple horizons?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The article is well-structured.\n2. Some experiments are meaningful."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces **TimeRAF**, a Retrieval-Augmented Forecasting model designed to enhance so-called zero-shot time series forecasting by leveraging retrieval-augmented techniques. The model integrates external time series knowledge bases to supplement forecasting tasks, employing an end-to-end learnable retriever."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Channel Prompting Efficiency Concern**: \n If I understand correctly, **Channel Prompting** requires each channel to process and fuse information from **k retrieved time series**. This may introduce significant computational overhead, especially for datasets with many channels (e.g., **traffic data**). The paper lacks an analysis of the efficiency and scalability of this mechanism in such scenarios. \n\n2. **Lack of Pre-training Details for TSFM**: \n The paper omits essential information about the **pre-training process** of the time series foundation model (TSFM), such as the **datasets used, training procedure, and loss functions**. This raises concerns about the reproducibility and transparency of the method.\n\n3. **Misinterpretation of Zero-Shot Learning**: \n I disagree with the zero-shot setup in the paper. True zero-shot learning implies that the model is tested on datasets it has **never seen during training**. For example, training on **ETTh1** and testing on **ETTh2** would align with this standard. However, as depicted in **Figure 2**, TimeRAF appears to fine-tune both the retriever and forecaster on parts of the test data, which **violates the principles of zero-shot forecasting**. The authors could refer to **GPT4TS or TimeLLM's zero-forecasting setup** for a more rigorous zero-shot methodology.\n\n4. **Unfair Comparison in Experiments (Table 2)**: \n The experiments reported in **Table 2** are not entirely fair. According to **Section 4.1**, TimeRAF uses an input length of **512**. For fair comparison, **all baselines** should have the same input length. However, the reported metrics for baselines like **iTransformer, PatchTST, and DLinear** do not correspond to **input length = 512**. I have run these models with input lengths of **512**, and the results (e.g., PatchTST on **ETTm1** with 96-step forecast: 0.292, and **ETTm2**: 0.165) differ significantly from those reported in the paper.\n\n5. **Incomplete Experimental Results**: \n The paper only reports results for a single forecast length (**96**). To comprehensively demonstrate the effectiveness of TimeRAF, it is essential to present results for **multiple forecast horizons** (e.g., **96, 192, 336, and 720**). This would provide a more complete evaluation of the model’s performance.\n\n6. **Typos and Grammar Issues**: \n There are several typos and grammatical errors in the paper. For example:\n - **Lines 53-70**: “our exploration of Retrieval-Augmented for time series Forecasting (RAF)” is awkwardly phrased. \n - **Line 301**: “...updating the retriever,, the whole...” has an extra comma. \n The authors should carefully proofread the paper to improve its clarity and polish.\n\n7. **Missing Axis Labels in Figure 5**: \n **Figure 5** lacks a **y-axis label**, which makes it difficult to interpret the results. Proper labeling is essential for readers to understand the figures accurately.\n\n8. **Scaling Issue in Retrieval-Augmented Forecasting**: \n One important concern is how TimeRAF handles cases where the **retrieved time series data** are in **different magnitudes** than the input data. It would be useful if the authors could elaborate on any **normalization or scaling techniques** employed to address this issue.\n\n9. **Implementation Details for Database Usage (Section 3.3)**: \n The paper mentions that **non-similar data** are used during training, while **similar data** are used during inference (Section 3.3, lines 187-193). However, the specific **implementation details** of how this is done for different datasets are unclear. Providing more insights here would enhance the paper's reproducibility and understanding.\n\n10. **Lack of Code Availability**: \n The authors do not provide **code** for their experiments. Releasing code is crucial for **reproducibility** and would allow the community to verify the results and build upon the work."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024timeraf,\ntitle={Time{RAF}: Retrieval-Augmented Foundation model for Zero-shot Time Series Forecasting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zd5Knrtja4},\nnote={under review}\n}"
},
"abstract": {
"value": "Time series forecasting plays a crucial role in data mining, driving rapid advancements across numerous industries. \nWith the emergence of large models, time series foundation models (TSFMs) have exhibited remarkable generalization capabilities, such as zero-shot learning, through large-scale pre-training. \nMeanwhile, Retrieval-Augmented Generation (RAG) methods are widely employed to enhance the performance of foundation models on unseen data, allowing models to access to external knowledge. \nIn this paper, we introduce **TimeRAF**, a **R**etrieval-**A**ugmented **F**orecasting model that enhance zero-shot time series forecasting through retrieval-augmented techniques.\nWe develop customized time series knowledge bases that are tailored to the specific forecasting tasks.\nTimeRAF employs an end-to-end learnable retriever to extract valuable information from the knowledge base.\nAdditionally, we propose Channel Prompting for knowledge integration, which effectively extracts relevant information from the retrieved knowledge along the channel dimension.\nExtensive experiments demonstrate the effectiveness of our model, showing significant improvement across various domains and datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"time series forecasting",
"retrieval augmented generation",
"time series foundation model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e0932769813db53aa2a9eda7051289516092f95e.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on time series and dynamical systems"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "TimeRAF: Retrieval-Augmented Foundation model for Zero-shot Time Series Forecasting"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zdKgyC2vnQ | MiniDrive: More Efficient Vision-Language Models with Multi-Level 2D Features as Text Tokens for Autonomous Driving | main | Withdraw | Vision-language models;Autonomous driving | applications to robotics, autonomy, planning | Enming Zhang;Xingyuan Dai;Yisheng Lv;Qinghai Miao | ~Enming_Zhang4;~Xingyuan_Dai1;~Yisheng_Lv1;~Qinghai_Miao1 | 3;5;5 | 5;4;3 | 2;2;2 | 2;2;2 | 2;2;2 | 4.333333 | 4 | 2 | 2 | 2 | -0.866025 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Same as limitations"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. **Efficiency**: *MiniDrive* is a lightweight VLM with low FLOPs, suitable for real-time deployment on limited hardware, making it highly practical for autonomous driving.\n\n2. **Dynamic Adaptation**: The *Dynamic Instruction Adapter* enhances cross-modal understanding by adapting visual tokens to user instructions, improving interaction quality in real-world applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors proposed MiniDrive framework, a lightweight vision-language model for autonomous driving, optimizing efficiency with reduced parameters. Using the FE-MoE module for visual processing and the DI-Adapter for dynamic instruction response, it achieves competitive performance on the DriveLM dataset while lowering computational costs, making it practical for real-time use on limited hardware."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "This paper, though notable, leans more toward an engineering approach than a research-oriented contribution. I identify the following limitations:\n\n1. **Insignificant Training Cost Reduction**: Reducing training cost is not significant. A comparable 4-bit or 8-bit quantized large language model (LLM) with ~7B parameters can also be fine-tuned on a single RTX 4090 GPU using adapters, which limits the novelty in terms of efficiency.\n\n2. **Limited Benchmarking Scope**: The integration of UniRepLKNet for visual feature extraction and the Mixture of Experts (MoE) design should be evaluated on a broader range of benchmarks beyond autonomous driving (AD) datasets. If the authors focus solely on AD datasets, it would be beneficial to emphasize how the architecture uniquely benefits AD scenarios. Currently, the proposed FE-MoE framework appears generalizable to various visual modality applications, lacking a clear advantage for AD-specific use cases.\n\n3. **Lack of Task-Specific Uniqueness in Dynamic Instruction Adapter**: The Dynamic Instruction Adapter is a promising concept, though it suffers from a similar limitation as (Limination 2) — it lacks specialization for AD tasks, which could limit its applicability in scenarios beyond general-purpose visual adaptation. Also, this idea is not new and similar idea is applied in many other works (e.g. Llama-Adapter [1] and CogVLM [2]).\n\n4. **Ambiguity in the MoE Approach**: The FE-MoE’s primary goal seems to be fusing tokens from different camera sources, yet the reasoning behind using a Mixture of Experts is unclear. In most AD scenarios, information from all cameras is essential. Applying a hard limit (e.g., selecting only the top-k experts, where \\( k < 6 \\)) risks discarding critical visual data from unselected cameras. Conversely, if \\( k = 6 \\) (i.e., using all cameras), simpler feature transformation and merging techniques could be more efficient than the current gating + softmax + elementwise weighted merge approach, which substantially increases GPU memory consumption.\n\n5. **Simplistic Illustrative Examples**: Figure 5 does not adequately demonstrate the benefits of MiniDrive over competing frameworks, such as DriveLM-Agent. The examples lack complexity and do not showcase significant advantages.\n\n6. **Incomplete Comparative Evaluation**: In Table 2, models like LLM-Driver and Drive-GPT4 possess explicit waypoint prediction capabilities and are thus evaluated with UniAD metrics. MiniDrive, however, seems like has not implemented waypoint prediction, preventing a direct comparison with these models and leaving its performance on this critical aspect unaddressed.\n\n[1] Zhang, Renrui, et al. \"Llama-adapter: Efficient fine-tuning of language models with zero-init attention.\" arXiv preprint arXiv:2303.16199 (2023).\n\n[2] Wang, Weihan, et al. \"Cogvlm: Visual expert for pretrained language models.\" arXiv preprint arXiv:2311.03079 (2023)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Given the paper’s lack of novelty in its proposed FE-MoE mechanism, absence of real-time performance benchmarks, and incomplete evaluation for autonomous driving environments (e.g., multi-camera setup), its contributions are limited in scope and significance. The paper would benefit from a clearer demonstration of MiniDrive’s practical impact, including concrete real-time performance metrics, and a more thorough investigation of its architectural claims.\n\n1.\tReal-Time Performance Validation: Could you provide detailed inference times or benchmarks to clarify MiniDrive’s performance in real-time environments? This would help substantiate the paper’s emphasis on efficiency.\n2.\tDifferentiation of FE-MoE from Existing MoE Architectures: How does FE-MoE specifically improve upon standard MoE frameworks, particularly sparse MoEs, in terms of efficiency for autonomous driving applications?\n3.\tMulti-Camera Integration Methodology: Could you elaborate on how MiniDrive supports multi-camera setups in autonomous driving, and whether it maintains robust performance across diverse camera angles and resolutions?\n4.\tBaseline Model Configuration: In Table 3, could you clarify the configuration of the baseline model without FE-MoE, specifically addressing the parameter count and computational requirements to ensure fair comparison?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tGoal-Oriented Design for Autonomous Driving: MiniDrive seeks to address the high resource demands of typical vision-language models, specifically aiming to enable real-time processing in the context of autonomous driving.\n2.\tEfficient Model Parameters and FLOPs: The model claims efficiency in terms of FLOPs and memory usage, potentially supporting multi-instance training on a single GPU, which can be beneficial for applications with limited computational resources."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes MiniDrive, a vision-language model optimized for autonomous driving tasks. MiniDrive incorporates the Feature Engineering Mixture of Experts (FE-MoE) and Dynamic Instruction Adapter to dynamically handle visual features and integrate contextual information from text inputs. The paper highlights the model’s lower FLOPs, parameter count, and memory usage compared to larger models, aiming for efficient processing on single GPUs"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tLack of Real-Time Performance Evaluation: Despite the model’s claim of real-time suitability, there is no specific evaluation of inference time or processing speed, which is critical for applications in autonomous driving. The model’s practical performance remains unproven in real-world settings.\n2.\tLimited Novelty of FE-MoE: 1. The FE-MoE mechanism in MiniDrive employs a continuous weighted-sum approach across multiple experts, similar to the foundational work on Mixture of Experts by Shazeer et al. (2017). In this pioneering study, Shazeer et al. introduced a sparsely-gated MoE structure where multiple experts contribute via a weighted-sum aggregation. While the original model aimed at efficiency by leveraging sparse gating, MiniDrive’s FE-MoE does not implement sparse gating, thus potentially requiring higher computational resources. Given the similarity in the underlying weighted-sum aggregation concept and the absence of a sparse mechanism, the FE-MoE lacks sufficient differentiation from established MoE architectures and does not clearly demonstrate a unique advantage for autonomous driving. Further clarification on how FE-MoE improves upon traditional MoEs would strengthen the claim of novelty.\n3.\tInsufficient Multi-Camera Environment Evaluation: While the model mentions multi-image processing, there are no evaluations or specific methodologies provided to demonstrate effectiveness in multi-camera setups, which are essential in autonomous driving for comprehensive scene understanding.\n4.\tInadequate Control Experiment Details: In Table 3, the comparison between MiniDrive and a baseline without FE-MoE is presented. However, there is insufficient information on the exact parameter count and FLOPs of the baseline model, raising concerns about the fairness and interpretability of these results.\n5.\tMinor Errors and Inconsistencies: The paper contains typographical errors (e.g., “ReLu” instead of “ReLU” and inconsistent citation formatting). These, along with unclear baseline setup explanations, detract from the paper’s overall clarity and polish."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Most of my questions are included in the Weakness above. I hope the authors can respond concisely to them.\n\nSuggestions: I hope the paper can clarify more about the dataset and task setup."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1.\tThe research and idea proposed in this paper are very well-motivated, focusing on the efficiency of the VLM-based approach that is critical to deploying the method in practice. The methods and findings in this paper could inspire relevant research in this direction.\n2.\tThe paper applies the convolutional UniRepLKNet as the visual encoder, instead of ViT-based encoders, to efficiently encode image inputs from multiple directions and introduces an MoE to enhance the representations. The ablation study justifies the effectiveness of this model.\n3.\tThe proposed MiniDrive outperforms other recent works on the DriveLM dataset by a large margin despite having significantly fewer parameters and a simpler training process, which demonstrates the potential of this method."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a lightweight and efficient vision-language model for addressing the QA-based autonomous driving task (DriveLM). Specifically, a framework called MiniDrive is proposed, which incorporates a Feature Engineering Mixture of Experts module (FE-MoE) and a Dynamic Instruction Adaptor (DI-Adapter). The FE-MoE applies an efficient visual encoder, UniRepLKNet, built based on large convolution kernels, for efficient encoding of multi-view images, followed by a mixture of experts similar to multi-head convolution to enhance model expressiveness. The DI-Adapter conditions visual features based on the input instruction. The proposed method is tested on DriveLM and CODA-LM datasets to verify the model performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe paper argues that the DI-Adapter is one of its main contributions; although it seems effective, as shown in Table 3, the novelty is very limited. The idea of adapting visual representations conditioned on instruction has been extensively considered in VLM literature, e.g., the classic InstructBLIP.\n2.\tThe proposed method encodes observations from six directions for DriveLM QA. However, it is unclear to me how the model utilizes and benefits from cross-image information.\n - From Section 3.3, each image is modeled independently, and all features are fed to the T5 language model together.\n - From the examples provided in this paper, it seems that all questions (except Figure 7 case 2) clearly specify exactly one view for QA. Hence, it is unclear to me if encoding all views is necessary and how much improvement MiniDrive can gain from this. \n3.\tThe biggest weakness of this paper to me is the superficial experiments and lack of in-depth analysis of the system and its behavior.\n - The paper argues that the method has significant efficiency advantages and real-time inference, but there is no experiment to compare the speed with previous approaches. E.g., how many frames/questions can the model process per second (FPS)?\n - It is unclear how exactly the proposed method performs in different tasks, e.g., perception, prediction, and planning, as categorized by the DriveLM dataset, and what is the influence of each proposed component in these tasks.\n - The paper and title highlight the use of multi-level 2D features (the UniRepLKNet), but there is no experiment to compare this encoder to encoders applied in previous works.\n - The DriveLM dataset consists of three components for evaluating Motion, Behavior, and P1-3. This paper only studies P1-3, which includes relatively simple one-shot QA instead of Behavior or Motion, where the traffic anticipation and actual rollout of the ego vehicle are needed. I am not convinced that only evaluate on P1-3 can reflect the actual potential of the proposed method in autonomous driving.\n - A relevant question to the above is how to translate the QA responses to actual vehicle control. It is unclear to me how many questions need to be asked and how to integrate those answers before the agent can make a correct control decision in a complex scenario."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@misc{\nzhang2024minidrive,\ntitle={MiniDrive: More Efficient Vision-Language Models with Multi-Level 2D Features as Text Tokens for Autonomous Driving},\nauthor={Enming Zhang and Xingyuan Dai and Yisheng Lv and Qinghai Miao},\nyear={2024},\nurl={https://openreview.net/forum?id=zdKgyC2vnQ}\n}"
},
"abstract": {
"value": "Vision-language models (VLMs) serve as general-purpose end-to-end models in autonomous driving, performing subtasks such as prediction, planning, and perception through question-and-answer interactions. However, most existing methods rely on computationally expensive visual encoders and large language models (LLMs), making them difficult to deploy in real-world scenarios and real-time applications. Meanwhile, most existing VLMs lack the ability to process multiple images, making it difficult to adapt to multi-camera perception in autonomous driving. To address these issues, we propose a novel framework called MiniDrive, which incorporates our proposed Feature Engineering Mixture of Experts (FE-MoE) module and Dynamic Instruction Adapter (DI-Adapter). The FE-MoE effectively maps 2D features into visual token embeddings before being input into the language model. The DI-Adapter enables the visual token embeddings to dynamically change with the instruction text embeddings, resolving the issue of static visual token embeddings for the same image in previous approaches. The DI-Adapter enables the FE-MoE to further extract and process 2D visual features based on user instructions, focus on attention regions, and reduce redundancy. Compared to previous works, MiniDrive achieves state-of-the-art performance in terms of parameter size, floating point operations, and response efficiency, with the smallest version containing only 83M parameters."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Enming_Zhang4",
"~Xingyuan_Dai1",
"~Yisheng_Lv1",
"~Qinghai_Miao1"
]
},
"authors": {
"value": [
"Enming Zhang",
"Xingyuan Dai",
"Yisheng Lv",
"Qinghai Miao"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Vision-language models",
"Autonomous driving"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "zhang|minidrive_more_efficient_visionlanguage_models_with_multilevel_2d_features_as_text_tokens_for_autonomous_driving"
},
"pdf": {
"value": "/pdf/fb266b56650602951e03f412938f8c9137ddcd1f.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to robotics, autonomy, planning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MiniDrive: More Efficient Vision-Language Models with Multi-Level 2D Features as Text Tokens for Autonomous Driving"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
zeAOzn80VQ | Dataset Ownership Verification in Contrastive Pre-trained Models | main | Active | Dataset Ownership Verification;Data Protection;Contrastive Learning;Pre-trained Models;Self-supervised Learning | alignment, fairness, safety, privacy, and societal considerations | 5;5;6;6;8 | 3;4;4;4;3 | 2;2;3;3;3 | 2;2;3;2;2 | 3;1;3;3;3 | 6 | 3.6 | 2.6 | 2.2 | 2.6 | -0.372678 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- How effective is the approach when the suspect model undergoes adaptation, such as fine-tuning on a different dataset or with altered weights? Is the method still robust under such conditions, or are there specific scenarios where fine-tuning could mask the original dataset’s influence on the model?\n- How sensitive is the proposed DOV method to variations in the suspect model’s hyperparameters or pre-training dataset characteristics (e.g., domain-specific data)?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper presents a unique dataset ownership verification (DOV) method specifically tailored for self-supervised contrastive learning models. This is a valuable addition to the field, as existing DOV methods are generally focused on supervised or non-contrastive learning models, leaving a gap that this paper addresses.\n-The authors conduct extensive experiments across multiple datasets (CIFAR10, CIFAR100, SVHN, ImageNet variants) and contrastive learning architectures (SimCLR, BYOL, MoCo, DINO), demonstrating the method's effectiveness and generalizability. This robust experimental setup strengthens the validity of the proposed approach.\n- By requiring only a small subset of the defender’s data for verification, the proposed method is more computationally efficient than baseline methods like D4SSL, which require access to the entire dataset, making it suitable for large-scale applications."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a novel method for verifying dataset ownership in self-supervised contrastive learning models, protecting curated datasets from unauthorized use. The proposed approach leverages the \"contrastive relationship gap,\" capturing distinct similarities in representations for models trained on a defender's dataset versus unrelated data. Through a three-step process, the method identifies unauthorized usage efficiently and with high accuracy. Experimental results on several datasets and models show that this approach outperforms existing methods in accuracy, computational efficiency, and robustness, even under privacy-preserving settings like DP-SGD, making it a promising tool for dataset security in machine learning."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- While the proposed method offers a novel approach to dataset ownership verification, its applicability is limited to contrastive learning models. Many self-supervised learning models use objectives other than contrastive learning, so expanding the method’s scope could enhance its impact. However, this limitation is relatively minor.\n- In line 488, the authors state that \"the private training method does not affect our verification results,\" but this claim is based on experiments using only DP-SGD with a high privacy budget (epsilon=50). To support this claim, it would be beneficial to test the method under stronger privacy settings or with alternative privacy-preserving techniques, such as differentially private generative models."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could early stopping or other techniques be leveraged to reduce the effectiveness of this detection method? A brief section on possible attack approaches in the appendix may be considered.\n\nThe paper is quite extensive very thorough! :)"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. **Innovative Approach:** The method uniquely applies to self-supervised models by leveraging characteristics of contrastive learning, filling a gap in current DOV methods that primarily target supervised learning.\n2. **Black-box Applicability:** The approach is suitable for black-box scenarios, which is practical and aligned with real-world applications where full model access is unavailable. The approach demonstrates robust performance across different datasets (e.g., CIFAR, ImageNet) and architectures, indicating generalizability.\n3. **Effective Performance:** Results show high sensitivity, specificity, and AUROC scores, suggesting that the proposed metric reliably distinguishes between legitimate and unauthorized dataset use.\n4. **Efficiency:** Compared to alternatives, the method is computationally efficient, which enhances its applicability to large datasets like ImageNet.\n5. **Thorough evaluation:** The paper is very comprehensive, they make specific claims and justify them with solid experiments and results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a novel method for dataset ownership verification (DOV) specifically tailored for contrastive pre-trained models in self-supervised learning. The method utilizes two observations about contrastive learning: the unary and binary relationships in the embedding space of models trained on a specific dataset. These observations are exploited through a contrastive relationship gap metric, calculated between the suspected model and a shadow model pre-trained without the dataset in question. Comprehensive experiments across datasets and models demonstrate that the method effectively detects unauthorized dataset usage, outperforming baseline techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Dependency on Feature Representation Access:** The method requires access to feature representations, which might not be feasible in all practical scenarios, as many services limit this access for security reasons.\n\n2. **Limited Application to Non-Contrastive Pre-Trained Models:** The method’s effectiveness is constrained to contrastive learning. Other prevalent pre-training strategies, such as masked image modeling (MIM), are not effectively addressed, potentially limiting applicability. However, the authors make a clear claim and explain this as a limitation, thus I think the weaknesses are ok given this is one of the earlier works."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper addresses an important and novel problem—dataset copyright protection in contrastive learning. The authors provide a comprehensive range of experiments, and the proposed method consistently demonstrates outstanding results across all tested settings."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a method for protecting datasets from infringement in contrastive learning scenarios by introducing two relationships: unary and binary. The unary relationship assesses the clustering ability of representations for augmentations of a known sample, while the binary relationship evaluates the separation between representations of two known samples. Using these properties, the authors define distance metrics to evaluate protected training data (known to both the suspect and defender) and secret data (unseen by the suspect but known to the defender). Since suspect models are overfitted to training data and have not seen the secret data, they exhibit different behavior for these two data types, which the method uses to identify suspect models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have several concerns: \n\n\n1. The proposed unary and binary relationships align with the goals of contrastive learning, which promotes close representations for variants of the same sample and separation for different samples. The authors rely on overfitting to training data for verification, but as contrastive learning improves, this approach may be less effective. Enhanced contrastive learning might eventually generalize representations, clustering representations from single-sample into a single point, and representations obtained from $N$ distinct samples into $N$ separate points, even for unseen data. Though this is an idealized scenario, this aligns with the direction of contrastive learning research, so verification should be robust to and able to coexist with advances in contrastive learning.\n\n\n2. The proposed method is similar to verification approaches in supervised learning that use differences in confidence scores or accuracies between training and test data. As Guo et al. [1] demonstrated, task performance differences between seen and unseen data are well-documented, but they are not commonly used for verification due to two primary reasons:\n1) Future research is expected to reduce overfitting and improve generality, as noted in Comment 1.\n2) An adversary could argue that similar samples exist in their training data by chance, complicating proof that observed low p-values are due to protected data.\n\nTo address these, verification metrics should be designed independently of task performance, ensuring that evidence cannot naturally occur by chance. Given this, the proposed method may lack admissible evidence of dataset infringement.\n\n[1] Chuan Guo et al. On Calibration of Modern Neural Networks\n\n3. Given the challenges noted above, many dataset protection methods in supervised learning use backdoor attacks or data poisoning. There are also backdoor attack studies specific to contrastive learning, such as Zhang et al. [2] and Carlini et al. [3]. The authors, however, only compare their method to a model protection technique and a unary-only method (e.g., EncoderMI). I suggest adding comparisons with established backdoor and data poisoning methods for contrastive learning.\n\n[2] Zhang et al. Data Poisoning-based Backdoor Attacks to Contrastive Learning\n\n[3] Carlini et al. Poisoning and Backdooring Contrastive Learning\n\n\n4. Section 4.5.2 is critical, as contrastive learning is often used as a pretraining method, and adversaries are more likely to release fine-tuned models. Thus, verification post-fine-tuning is essential. However, this section only states that experiments were conducted, without presenting results in the main text. It references \"Table 7 in Appendix A.6,\" which are outside the main manuscript. Ideally, essential content should be included in the main text, with the appendix for supplementary details. Additionally, details on the experiments are missing from the appendix, and Table 7 should include downstream performance results, as small learning rates could affect the reported outcomes. Additionally, Figure 3 occupies too much space; it would be better to reduce its size and include more analysis results directly in the main manuscript.\n\n\n5. The authors state that they \"focus on the black-box setting where defenders have no information about other training configurations (e.g., loss function and model architecture) and can only access the model via Encoder as a Service (EaaS)\" and that \"defenders can only retrieve feature vectors via the model API.\" However, Section 4.5.2 notes, \"we can only use the predicted probability vectors of the input samples,\" which seems inconsistent. In a true black-box setting, I would expect only the predicted class ID, not output logits or probability vectors, to be available.\n\n\n\n6. The analysis related to the amount of $D_{alt}$ in Figure 4 is essential but lacks explanation in Section 4.4.2. There is no clarification on how the authors control the ratio, whether by increasing $D_{alt}$ or reducing $D_{pub}$, or on what each point in Figure 4 represents. Since the change in log(p) for $D_{pub}$ suggests a controlled amount of $D_{pub}$, it may not be appropriate. With a fixed $D_{pub}$, only the amount of $D_{alt}$ should be adjusted."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Could the authors provide both theoretical and experimental insights into the role of comparative learning within your approach?\n2. Authors only compare the method with two baselines: DI4SSL and EncoderMI. Could the authors compare more recent related works?\n3. Table.5 is missing EncoderMI time cost. Could author afford more information about time cost of other baselines?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper introduces a novel method for dataset ownership verification (DOV) specifically tailored for contrastive pre-trained models, addressing a critical need in data rights protection.\n2. The paper introduces the concept of \"contrastive relationship gap,\" providing a clear technical approach to differentiate the model's performance on training and non-training datasets.\n3. The method has been validated across multiple contrastive pre-trained models, including SimCLR, BYOL, SimSiam, MoCo v3, and DINO, demonstrating its broad applicability.\n4. Experimental results show that the method can significantly outperform previous methodologies with a high probability of rejecting the null hypothesis (p-value well below 0.05)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work proposes the first dataset ownership verification method specifically for self-supervised pre-trained models using contrastive learning. The paper identifies significant variations in unary and binary instance relationships within embedding spaces when models are trained with specific datasets, compared to those trained without them. It introduces the concept of \"contrastive relationship gap,\" a novel technique for verifying dataset ownership in contrastive pre-trained models. Extensive experiments demonstrate the approach's effectiveness, with a p-value significantly below 0.05, surpassing previous methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. As paper illustrated in limitations and conclusion, the method is primarily effective for encoders pre-trained with contrastive learning and may not perform well with other self-supervised learning pre-training methods like Masked Image Modeling (MIM).\n2. The method lacks comparisons with enough baselines in the experimental section to clearly show its superiority.\n3. CONTRASTIVE RELATIONSHIP GAP Part is difficult and too mathematical to understand. The authors could improve the writing to make it easier for the reader to understand."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Does the proposed method work with the CLIP model, which also utilizes contrastive learning for pre-training? \n2. Are the findings influenced by the number of training classes and examples? For instance, if Msus has 10k classes and Msdw has only 10, do the proposed methods work as expected?\n3. How sensitive is our method to the choice of augmentations? Specifically, we consider scenarios where new augmentation techniques are introduced during evaluation but were not present during training of Msus. In such cases, the distances between outputs from Msus may increase, even when it used public data?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The research topic is important.\n2. The authors conduct many experiments.\n3. The performance is strong compared with baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors investigate the challenge of protecting high-quality open-source datasets from misuse by individuals who lack ownership verification. To address this issue, they propose a method that exploits a natural property of contrastive learning: the distinct distances between seen and unseen examples. Specifically, they demonstrate that these distances often appear significantly larger for unseen examples compared to their seen counterparts. The authors conduct an extensive set of experiments with various models and datasets to evaluate the efficacy of their proposed approach, which yields substantial improvements over existing baselines."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. While the authors present distance metrics for dsus and dsdw, I believe it would be beneficial to include some visualizations.\n2. Contrastive learning is currently a hot research area in computer vision, but the proposed methods appear to be limited to it, which may restrict their broader applicability.\n3. The distances between examples are influenced by many factors beyond seen and unseen examples, including generalization capabilities and augmentations. I have concerns that the results presented may not fully support the claims."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "The first dataset ownership verification method specifically designed for contrastive pre-trained models."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024dataset,\ntitle={Dataset Ownership Verification in Contrastive Pre-trained Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zeAOzn80VQ},\nnote={under review}\n}"
},
"abstract": {
"value": "High-quality open-source datasets, which necessitate substantial efforts for curation, has become the primary catalyst for the swift progress of deep learning. Concurrently, protecting these datasets is paramount for the well-being of the data owner. Dataset ownership verification emerges as a crucial method in this domain, but existing approaches are often limited to supervised models and cannot be directly extended to increasingly popular unsupervised pre-trained models. In this work, we propose the first dataset ownership verification method tailored specifically for self-supervised pre-trained models by contrastive learning. Its primary objective is to ascertain whether a suspicious black-box backbone has been pre-trained on a specific unlabeled dataset, aiding dataset owners in upholding their rights. The proposed approach is motivated by our empirical insights that when models are trained with the target dataset, the unary and binary instance relationships within the embedding space exhibit significant variations compared to models trained without the target dataset. We validate the efficacy of this approach across multiple contrastive pre-trained models including SimCLR, BYOL, SimSiam, MOCO v3, and DINO. The results demonstrate that our method rejects the null hypothesis with a $p$-value markedly below $0.05$, surpassing all previous methodologies."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Dataset Ownership Verification",
"Data Protection",
"Contrastive Learning",
"Pre-trained Models",
"Self-supervised Learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/de12e72dec8eb9de73c69645405b3ec8b0c7e9e1.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/09c6a4dfe1f8f25543e9d5d258b76ab3daf3e2e1.zip"
},
"title": {
"value": "Dataset Ownership Verification in Contrastive Pre-trained Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zeBhcfP8tN | Trust but Verify: Programmatic VLM Evaluation in the Wild | main | Active | vision-language models;evaluation;hallucinations | datasets and benchmarks | 3;3;5;5 | 4;4;4;4 | 2;2;3;2 | 2;2;2;3 | 2;2;3;2 | 4 | 4 | 2.25 | 2.25 | 2.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See Weakness"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "I do like how this method could evaluate VLM compositionally with a prepared set of programs, instead of all in a whole with a LLM. \nThe design of helpfulness and truthfulness is interesting. It is interesting to find a way to evaluate hallucination in VLMs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Programmatic VLM Evaluation (PROVE) introduces a novel benchmark for assessing VLMs. Normally, we evaluate image captioning with the generated caption and the gold caption as two whole paragraphs. Building upon DOCCI, which is a new dataset that came out this year, PROVE collects a robust set of 10.5k visually grounded question-answer (QA) pairs by using a detailed scene-graph approach that evaluates image captions compositionally. It provides a programmatic evaluation strategy that measures both the helpfulness and truthfulness of a free-form model responses."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The presentation is poor. It took me a while to finally realize that this paper presents a way to evaluate image-captioning, through breaking the captioning task into VQA tasks, and the answers are evaluated by a program generated by GPT based on gold scene graph, instead of evaluated by GPT based on the gold caption. \n\n2. The results in Table1 and example outputs in Figure 5 are very confusing. Why are the performance of all models look similar? From my personal experience, GPT-4o should be better than other models, especially the open-source models by a lot. But they seem to have same performance as in Table1. From the original DOCCI paper, different models also score very differently. From Figure 5 in the first example's top question, I don't understand why the hscore for GPT-4o and LLaVA are both so high -- none of them gave the correct answer. In that same example's bottom question, I don't understand why the hscore for LLaVA is so low, given that it answers the question perfectly. \n\n3. All the questions are generated by LLM. This could potentially only include easy questions, and might explain why the performance in Table 1 are all similar. \n\n4. The Oracle setting result is way too low -- only 4 % higher than all other models. Isn't the Oracle setting the same setting you applied when generating the QA dataset? Shouldn't this setting then achieve 100% in accuracy? \n\n5. The average cosine similarity score in (1) and (2) is not very convincing. From Figure 5 in the first example's top question, I don't understand why the hscore for GPT-4o and LLaVA are both so high -- none of them gave the correct answer. In that same example's bottom question, I don't understand why the hscore for LLaVA is so low, given that it answers the question perfectly."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Questions on the human study:\n- Are human ratings on helpfulness continuous or discrete?\n- The correlation score with the helpfulness score is 0.54 and quite low – can the authors provide insights into why this is the case? Is there anything humans pay more attention to for helpfulness that the hscore doesn’t capture?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well-motivated and tackles an important research problem in VLMs evaluation. The inclusion of truthfulness in addition to helpfulness is thoughtful and often neglected. \n- The paper is generally well-written with clear definitions of the helpfulness and truthfulness metrics, and helpful illustrations like figure 4. \n- The evaluation covers a broad range of models. \n- The authors perform multiple data filtering steps to ensure the correctness of the programs and high quality of the QA pairs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new benchmark which the authors constructed by turning detailed captions into scene graph representations, and generating QA pairs as well as the corresponding verification programs based on the scene graphs. It also proposes a programmatic evaluation of the VLMs’ responses on both helpfulness and truthfulness by comparing the predicted scene graphs and ground-truth scene graphs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The reviewer is mostly concerned about the use of models in multiple parts of the dataset generation, filtering, and evaluation pipeline, especially in extracting the scene graphs from captions. \n- For example, the scene graphs are not guaranteed to be completely accurate, as they are automatically extracted from the captions in the DOCCI dataset by an LLM without any human verification or filtering. \n- Similarly, as the authors mentioned, the sentence BERT model and visual entailment model OFA are used for metrics calculation, which means the evaluation accuracy is limited by these models’ accuracies, making this a much less rigorous evaluation benchmark. Have the authors analyzed how errors in these models might propagate through the evaluation?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weakness."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The definition of the two metrics, i.e. helpfulness and trustfulness, based on the scene graphs, is interesting.\n2. The writing is clear and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes a new evaluation paradigm, named PROVE, for evaluating the “helpfulness” and “trustfulness” of VLM. The evaluation is based on the newly proposed dataset, where the images are paired with LLM generated dense scene graphs using detailed descriptions and the QA pairs, together with an executable program to derive/verify the answer, are generated using LLMs. Then a “helpfulness” score (measuring the recall of entities) an “trustfulness” score (measuring the precision) are defined based on the program and the scene graph. Two limited human evaluations are provided, verifying the correctness of the eval paradigm. Multiple VLMs are evaluated, showing that these models can hardly balance the two metrics."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Generalizability of the proposed evaluation paradigm is limited. The evaluation requires a dense scene graph and an executable program, which limits its usage to only the proposed dataset. The evaluation can be hardly generalized to images/questions without detailed annotations. Moreover, the evaluation’s effectiveness is bounded by the quality of the dense scene graph/detailed caption. Anything that is not in the scene graph cannot be evaluated. This is not exactly a “in-the-wild” evaluation as the paper claimed.\n2. What is the advantage of the proposed method, over the claim-based evaluation method, where the model’s answer is decomposed into several claims, then LLMs verify each of the claims directly? The advantage of the latter includes that it is more flexible (does not require scene graph/program, thus can be applied to open world images), and more simple (thus is easier to apply).\n3. The human study shows Pearson correlation of 0.81 for helpfulness and 0.45 for trustfulness, which is not super-high (especially for trustfulness). Any analysis on this? Another human verification can be conducted in a side-by-side manner: given 2 model responses for the same question, let human raters annotate which one is better (in terms of helpfulness and trustfulness), then use this side-by-side human rating to check the evaluation scores."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- The citation for OFA seems to be wrong; it should be Wang et al., https://arxiv.org/abs/2202.03052"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- (1) Introduction of new hallucination benchmark for VLM based on programmatic questions generated from scene graph."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose PROVE, a new evaluation benchmark for VLMs’ hallucination. PROVE is built on top of the DSG scene graphs of DOCCI dataset, by generating question-answer pairs from the scene graphs and pragmatic/text-based filtering of wrong question-answer pairs. The question/GT answer/verification programs are generated with GPT4o, and the answers generated by VLMs are evaluated with Sentence-BERT and OFA. PROVE has two metrics — hscore (helpfulness) and tscore (truthfulness), based on recall and precision with respect to scene graphs, respectively. The authors evaluate different VLMs in PROVE, and show that different models have different balance between helpfulness/truthfulness. The authors also provide human evaluation showing that PROVE questions are mostly relevant and correct."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- (1) **Methodological contribution is weak.** The authors generate question-answer pairs from the existing Davidsonian scene graphs (DSG; Cho et al., 2024) already shared by DOCCI (Onoe et al., 2024) authors. DSG paper already provides a question-answering generation pipeline from the scene graphs, and it is not clear how the proposed question-answering generation pipeline is more novel or more helpful.\n\n- (2) **No comparison with existing metrics/benchmarks.** The authors mention many VLM hallucination benchmarks in the related work but do not show qualitatively or quantitatively how PROVE is better than existing benchmarks."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Reliable in-the-wild VLM benchmarking via programmatic verification & evaluation"
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024trust,\ntitle={Trust but Verify: Programmatic {VLM} Evaluation in the Wild},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zeBhcfP8tN},\nnote={under review}\n}"
},
"abstract": {
"value": "Vision-Language Models (VLMs) often generate plausible but incorrect responses to visual queries. However, reliably quantifying the effect of such hallucinations in free-form responses to open-ended queries is challenging as it requires visually verifying each claim within the response. We propose Programmatic VLM Evaluation (PROVE), a new benchmarking paradigm for evaluating VLM responses to open-ended queries. To construct PROVE, we provide a large language model with a high-fidelity scene-graph representation constructed from a hyper-detailed image caption, and prompt it to generate diverse question-answer (QA) pairs, as well as programs that can be executed over the scene graph object to _verify_ each QA pair. We thus construct a benchmark of 10.5k challenging but grounded visual QA pairs. Next, to evaluate free-form model responses to queries in PROVE, we propose a _programmatic_ evaluation strategy that measures both the helpfulness and truthfulness of a response within a unified scene graph-based framework. We benchmark the helpfulness-truthfulness trade-offs of a range of VLMs on PROVE, finding that very few are in-fact able to achieve a good balance between the two."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"vision-language models",
"evaluation",
"hallucinations"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/03bb9d6a0164ce2bdd575b5a6f0bed531ecdd08d.pdf"
},
"presentation": null,
"primary_area": {
"value": "datasets and benchmarks"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Trust but Verify: Programmatic VLM Evaluation in the Wild"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zeeLxGw5pp | Enhancing Robustness of Deep Learning via Unified Latent Representation | main | Active | deep learning robustness;out-of-distribution inputs;adversarial examples;VAE latent representation | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 1;1;1;5;8 | 4;4;4;4;3 | 1;2;1;3;4 | 1;1;1;2;4 | 1;2;2;2;3 | 3.2 | 3.8 | 2.2 | 1.8 | 2 | -0.840168 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- In the discussion, the authors report \"Even though adversarial examples from the discriminative model end up in the latent holes of the VAE, the active defense through HMC cannot return to the regions with high probability.\" Isn't this result in disagreement to the work by Kuzina et al., which suggests that you should be able to do this? Beyond just differences in discriminative and latent models, is there any explanation for this?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The question is important, the method is relatively straightforward, the main results are interesting, and there is good applicability to other work. I like the idea of being able to filter inputs before feeding them into a larger model."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors address the problem of distinguishing between adversarial inputs and out-of-distribution (OOD) inputs to a VAE. They propose a methodology for identifying whether an input is adversarial or OOD that does not rely on Bayesian inference over the model weights. Bayesian inference is taken as the state of the art. They compare their methodology to the state of the art, and demonstrate its effectiveness using pre-defined in-distribution and out-of-distribution datasets, and again using three different kinds of adversarial attacks. The results show that their method is as good as the SOTA, without relying on the same assumptions and approximations. They also report that, from a latent perspective, there is no difference between the adversarial and OOD inputs; considering the input space was helpful in distinguishing between the two."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "In general, I found the paper arguments difficult to follow. More scaffolding/introductory and connecting phrases would have been helpful."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Did you try scaling this method? What are the challenges with applying this to much larger datasets? \n- What is the impact of Lipschitz regularisation and GroupSort activations?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- The Probabilistic modeling seems reasonable"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a VAE-based method to tackle both OOD detection and adversarial robustness. They use a hole indicator obtained from sampling from $q(z|x)$ as the score for distinguishing clean from out-of-distribution and adversarial inputs. The paper further uses distance in the input space to distinguish adversarial from OOD inputs. The encoder part of the VAE is further trained with GroupSort activation and pre-defined Lipschitz constant as is commonly done in the field of provable robustness. Evaluation is done on Mnist, FashionMnist, SVHN and CIFAR10."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Dataset selection is not acceptable for an ICLR2025 paper. MNIST, FashionMNIST, and SVHN are only toy datasets. None of the datasets has a sufficient number of classes. CIFAR100 would be the bare minimum to see how well the method scales with the total number of classes. ImageNet-1K would be ideal since there is no high-resolution dataset (above 32x32) in the current pool.\n- The OOD evaluation is trivial. As many works have shown (e.g. [Breaking Down Out-of-Distribution Detection](https://arxiv.org/abs/2206.09880)), the only challenging benchmarks are OOD benchmarks that require the model to distinguish classes (for example CIFAR10 vs CIFAR100) instead of dataset statistics. CIFAR10 vs LSUN, CIFAR100, and CelebA is the minimum requirement (and similarly for CIFAR100). \nOther works like [PViT: Prior-augmented Vision Transformer for Out-of-distribution Detection](https://arxiv.org/html/2410.20631v1) are doing much more challenging benchmarks like ImageNet vs iNat, NINCO and ImageNet-o.\n- \"Bayesian variational autoencoders for unsupervised out-of-distribution detection\" is not even close to \"state-of-the-art\" in the field. Density-based methods have been shown time and time again to be worse at OOD detection than classifier-based methods. State-of-the-art OOD has been held by some form of pre-trained ViT for years, for example, [PViT](https://arxiv.org/html/2410.20631v1) or [Exploring the Limits of Out-of-Distribution Detection\n](https://arxiv.org/pdf/2106.03004). Just because this method is based on VAEs does not mean that it does not have to compare to actual state-of-the-art methods based on different methods.\n- Cleverhans is severely outdated for the evaluation of adversarial robustness. [AutoAttack](https://github.com/fra31/auto-attack) / [RobustBench](https://robustbench.github.io/) is the standard used by almost every paper in the field. \n- Adversarial robustness results are given in isolation. Please compare them to state-of-the-art values in this field from the RobustBench leaderboard. Also, from Section 3.2.3 it seems like the paper can only detect adversarial examples but not actually classify them.\n- The paper does not consider adversarial attacks on OOD data (as was done in [this paper](https://arxiv.org/abs/2003.09461)). This is commonly done to make an OOD sample appear to be from the in-distribution using adversarial attacks. Instead, the paper seems to distinguish only between clean data, OOD data, and adversarial data. \n- There seem to be few methodological contributions in this paper and instead, the paper mostly combines existing ideas in a rather straightforward fashion."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "* Strong justifications are necessary why VAEs are used instead of numerous highly performing generative models.\n\n* Their particular method should be better justified. \n\n* Much larger datasets should be used such as ImangeNet-1k.\n\n* Extensive performance comparisons should be performed with other methods."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "Their solution based on VAEs can potentially detect adversarial inputs and OOD inputs. Their insights adversarial inputs and OOD inputs have some similarities in the latent space are interesting, which can be potentially useful for other researchers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper addresses two challenges including adversarial examples and OOD inputs. The authors propose a solution using VAEs to those issues. The authors use some form of scores detecting if the corresponding latent code is in the hole or not. Their found that adversarial and OOD inputs share similar latent representations in VAEs. Their pre-trained VAE can be potentially used as a filter with any DNN classifier architecture trained on the same data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* The authors state \"...recent research revealed that such estimations are prone to errors, often providing higher likelihood values to both OoD and adversarial examples than to in-distribution data (Nalisnick et al., 2018).\" and \"It has been shown that DGMs do not produce valid estimations of p(x) when it comes to distinguishing between OoD and in-distribution (Nalisnick et al., 2018).\" However, the paper does not summarize the recent research results in the area. Indeed, there have been numerous advanced schemes such as De-biasing (DB), Input Complexity Based Likelihood, Likelihood Ratio (LRat), Likelihood Regret (LReg), Watanabe Akaike Information Criterion (WAIC), which are most based on VAEs. \n\n* I do not understand why \"Bayesian\" generative models must be used. There are lots of other types of generative models that are working very well such as GANs, flow-based models, autoregressive models, diffusion models, energy based models, etc.\n\n* I do not understand why VAE must be particularly considered. As discussed above, as of today, VAEs are fairly weak generative models.\n\n* Developing their specific mechanisms, the authors particularly chose the results form some particular researchers, Glazunov & Zarras, 2022 & 2023. But, I am not sure why those particular results are leveraged here. \n\n* My major concern is about the datasets. Only very small datasets are tested such as MNIST, FashionMNIST, SVHN, and CIFAR-10. These days, in numerous works on OOD, larger datasets are considered such as CIFAR-100, tiny-ImageNet, mini-ImageNet. Furthermore, most recently, in many works, ImangeNet-1k is often considered as in-distribution, while iNaturalist and Texture are considered as OOD. \n\n* The baseline methods are also very concerning. I hardly see any performance comparison with any other schemes. Seems that the authors tested their own schemes without any comparison to others in the literature. In addition to the methods that I mentioned above, over the past a couple of years, a number of very effective OOD methods have been developed. For example, please see the following references: \n\nYang et al., \"Generalized Out-of-Distribution Detection: A Survey,\" 23 Jan 2024.\n\nMiyai et al., \"Generalized Out-of-Distribution Detection and Beyond in Vision Language Model Era: A Survey,\" 31 Jul 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See Weaknesses"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The proposed VAE-based unified framework effectively combines adversarial and OOD sample detection, providing innovation and practicality. This approach offers a comprehensive protection solution for the relevant field. By acting as a modular plugin without modifying the original model’s architecture or weights, the method enhances adaptability and compatibility."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a unified framework based on Variational Autoencoders (VAE) to enhance deep learning robustness by simultaneously detecting adversarial and out-of-distribution (OOD) samples. The method provides an innovative solution for modular plug-and-play integration without altering the original model architecture, aiming to improve resilience across various applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. From a readability perspective, the paper lacks detailed derivation processes. I could not find corresponding derivations in the appendix, and extensive literature review is required to understand the method’s contents. This is unreasonable and makes the paper difficult to follow.\n\n2. I have concerns about the method’s practicality, as detecting a single sample requires a computation cost that is significantly higher than the original task, which is generally unacceptable in real-world applications. The paper needs to provide more analysis on the performance and overhead of the primary task to demonstrate the feasibility of the algorithm.\n\n3. Additionally, I am concerned about the lack of detailed VAE training specifics (at least to a reproducible extent), which is crucial for both the algorithm’s foundation and effectiveness.\n\n4. The dataset used in the experiments is relatively simple. If using ImageNet for computation and validation is challenging, at least including CIFAR-100 results as a reference would be beneficial.\n\n5. It is necessary to include experiments comparing with OOD detection and adversarial attack detection algorithms (demonstrating the main point that these methods cannot detect both simultaneously)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "Please see my detailed comments above."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1.\tIn the abstract, the authors claim that a pre-trained VAE can be seamlessly integrated into any architecture of a deep neural network (DNN) image classifier for the detection of adversarial examples and out-of-distribution (OoD) inputs. However, the experimental section lacks a description of the architecture of the image classifier used. Therefore, could the authors provide additional details about the architecture of the protected image classifier? Furthermore, could the authors validate the performance of the same pre-trained VAE for detection across a broader range of DNN image classifiers with diverse architectures?\n2.\tWhy do the experimental results presented in Table 9 of the appendix conflict with those in Table 1? Additionally, why does Table 1 not include the performance of baseline methods, as seen in Table 9? In Table 9, there is a comparative method labeled 'Entropy (ours)', yet this method is never mentioned elsewhere in the paper. Furthermore, while the authors state in the abstract that their results using the Bayesian VAE method are inconsistent with those of prior work, why do the results in Tables 9 and 10 align with the results presented in Tables 3 and 4 of [3]?\n3.\tFor the Bayesian VAE-based out-of-distribution (OoD) detection method, could the authors provide experimental results for the scenario of using FashionMNIST as in-distribution data versus MNIST as OoD data? Furthermore, for the classical VAE-based OoD detection method, could the authors provide experimental results for the scenario of using SVHN as in-distribution data versus CIFAR10 as OoD data?\n4.\tCould the authors provide a detailed explanation of the specific methods used to generate generative adversarial examples?\n5.\tThe paper states that the experimental performance of the Bayesian VAE method is inferior to that presented in [3]. Could the authors provide the specific codebase used for their Bayesian VAE implementation? Additionally, could they check for any differences compared to the implementation in [3] and offer some explanations for these discrepancies?\n6.\tGiven that each experiment in the experimental section is conducted 10 times to account for randomness and then the authors use the average as a final result, could the authors provide the corresponding variance for the relevant experimental outcomes?\n7.\tWhat is your motivation for your \"first method\" mentioned in the Abstract? And what is your view to be validated?\n8.\tWhat are \"latent holes\" and why does the \"The suggested approach\" allow it to be a \"gatekeeper\" in the Abstract?\n9.\tCould the authors visualize the dynamics of the attack strength with respect to the learned data latent representation using Variational Deep Embeddings (VADEs), as mentioned in Section 3.4 of the paper? Additionally, could they provide some insights and analysis related to this visualization?\n\n[1] Vacant Holes for Unsupervised Detection of the Outliers in Compact Latent Representation. UAI 2023.\n\n[2] Alleviating Adversarial Attacks on Variational Autoencoders with MCMC. NeurIPS 2022.\n\n[3] Do Bayesian Variational Autoencoders Know What They Don’t Know?UAI 2022."
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "1.\tThe authors propose a VAE-based framework, in which the VAE is trained in an unsupervised manner on the training data of protected classifiers, to simultaneously detect adversarial examples and Out-of-Distribution (OoD) inputs.\n2.\tThey demonstrate that an alternative score based on importance sampling with classical VAE can achieve comparable detection performance for OoD inputs compared to Bayesian methods.\n3.\tThey conduct extensive experiments on several widely used datasets for validation of OoD and adversarial attacks, demonstrating that the proposed method can effectively detect adversarial examples and OoD inputs."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Summary:\nThis paper presents a unified framework utilizing Variational Autoencoders (VAEs) to concurrently detect adversarial examples and Out-of-Distribution (OoD) inputs, without necessitating modifications to or access to the protected classifiers trained on the same dataset. Specifically, through scrutinizing the score based on Bayesian epistemic uncertainty for OoDs detection, the authors demonstrate that comparable detection performance can be achieved using an alternative score based on importance sampling with classical VAE formulations, thus circumventing the need for Bayesian approaches. Furthermore, they confirm that adversarial examples can be identified using the latter scores. After that, they introduce techniques to distinguish adversarial from OoD inputs by analyzing latent space and applying the Multi-Scale Structural Similarity (MSSSIM). Finally, the experiments conducted on several datasets widely used for validation of OoD and adversarial attacks demonstrate the effectiveness of their approach. However, the motivation of the overall method design is not clear, and the writing of the paper needs to be significantly improved."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tIn the introduction, the authors present their contributions with a focus that is not entirely consistent with the emphasis in the abstract. In the abstract, they highlight their discovery that Bayesian VAE methods underperform compared to classic VAE methods. However, in the introduction, they describe their contributions as the application of both Bayesian VAE and classic VAE methods for detecting adversarial and out-of-distribution (OoD) samples. Additionally, they do not mention in the introduction a separate method they propose for automatically differentiating between adversarial examples and OoD inputs.\n2.\tThe abstract does not effectively convey the authors' contributions to the reader. In the abstract, the authors claim to have developed a distinct method to automatically differentiate between adversarial examples and out-of-distribution (OoD) inputs. However, as demonstrated in Tables 7 and 8 in the experimental section, the method proposed in Section 3.2.3 is only capable of distinguishing generative adversarial examples against VAEs from other problematic inputs and does not effectively separate outliers from discriminative adversarial attacks against classifiers.\n3.\tAlthough the authors extend the out-of-distribution (OoD) detection score from [1] to simultaneously detect both adversarial examples and OoD inputs and design a method based on [2] to differentiate generative adversarial examples for VAEs from OoD samples, they do not provide sufficient theoretical insights into the validity of these proposed methods.\n4.\tIn the methods section, the paper devotes considerable space to explaining the rationale behind the Bayesian VAE-based approach and its associated scoring method. However, the authors ultimately favor the classic VAE-based approach, yet provide only a brief and limited introduction to it. This imbalance makes it difficult for readers to achieve a consistent and coherent reading experience.\n5.\tThe methods section of the paper is divided into numerous subsections, some of which lack clarity in conveying their intended content. Additionally, the logical connections between these subsections are not well-defined, making it challenging for readers to grasp the core content of the paper.\n6.\tMany false causal claims in the paper, eg., \" However, the thorough theoretical foundation of deep learning is still lacking. It results in a limited understanding of how deep neural networks generalize. Such a situation led to the discovery ……\". The authors should carefully check these.\n7.\tMany typical adversarial and OOD detection methods, such as [1-10], are missing. It would be better for the authors to incorporate these works in Related work.\n\n[1] Characterizing Adversarial Subspaces Using Local Intrinsic Dimensionality, ICLR 2018.\n\n[2] Adversarial Example Detection Using Latent Neighborhood Graph, ICCV 2021.\n\n[3] LiBRe: A Practical Bayesian Approach to Adversarial Detection, CVPR 2021.\n\n[4] Detecting Adversarial Data by Probing Multiple Perturbations Using Expected Perturbation Score, ICML 2023.\n\n[5] IntensPure: Attack Intensity-aware Secondary Domain Adaptive Diffusion for Adversarial Purification, IJCAI 2024.\n\n[6] A Baseline for Detecting Misclassified and Out-of-Distribution Examples in Neural Networks, ICLR 2017.\n\n[7] Out-of-distribution detection using multiple semantic label representations. NIPS 2018.\n\n[8] A simple unified framework for detecting out-of-distribution samples and adversarial attacks, NIPS 2018.\n\n[9] On the Importance of Gradients for Detecting Distributional Shifs in the Wild, NIPS 2021.\n\n[10] Unsupervised Out-of-Distribution Detection with Diffusion Inpainting, ICML 2023.\n\n8.\tIn the reference list, some entries are missing the conference or journal in which the papers were published, while others correspond to works that have since been formally published. Therefore, the reference list requires a thorough review. Below are some problematic references:\n\na. Sorting Out Lipschitz Function Approximation. ICML 2019.\n\nb. Towards Evaluating the Robustness of Neural Networks. IEEE Symposium on Security and Privacy 2017.\n\nc. Explaining and Harnessing Adversarial Examples. ICLR 2015.\n\nd. A Baseline for Detecting Misclassified and Out-of-Distribution Examples in Neural Networks. ICLR 2017.\n\ne. Deep Anomaly Detection with Outlier Exposure. ICLR 2019.\n\nf. Alleviating Adversarial Attacks on Variational Autoencoders with MCMC. NeurIPS 2022.\n\ng. PuVAE: A Variational Autoencoder to Purify Adversarial Examples. IEEE Access 2019.\n\nh. Training Confidence-calibrated Classifiers for Detecting Out-of-Distribution Samples. ICLR 2018.\n\ni. Enhancing The Reliability of Out-of-distribution Image Detection in Neural Networks. ICLR 2018.\n\nj. MagNet: A Two-Pronged Defense against Adversarial Examples. CCS 2017.\n\nk. Do Deep Generative Models Know What They Don't Know? ICLR 2019.\n\nl. Practical Black-Box Attacks against Machine Learning. AsiaCCS 2017.\n\nm. Defense-GAN: Protecting Classifiers Against Adversarial Attacks Using Generative Models. ICLR 2018.\n\nn. PixelDefend: Leveraging Generative Models to Understand and Defend against Adversarial Examples. ICLR 2018."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose using VAEs to enhance DNN robustness against both adversarial examples and OoD inputs by leveraging similarities in their latent space representations, allowing for their detection without retraining classifiers."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024enhancing,\ntitle={Enhancing Robustness of Deep Learning via Unified Latent Representation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zeeLxGw5pp},\nnote={under review}\n}"
},
"abstract": {
"value": "Adversarial examples and Out-of-Distribution (OoD) inputs constitute major problematic instances for the image classifiers based on Deep Neural Networks (DNNs). In particular, DNNs tend to be overconfident with their predictions, assigning a different category with a high probability. In this work, we suggest a combined solution to tackle both input types based on the Variational Autoencoder (VAE). First, we scrutinize the recent successful results in detecting OoDs utilizing Bayesian epistemic uncertainty estimation over weights of VAEs. Surprisingly, contrary to the previous claims in the literature, we discover that we can obtain comparable detection performance utilizing a standard procedure of importance sampling with the classical formulation of VAE. Second, we dissect the marginal likelihood approximation, analyzing the primary source of variation responsible for distinguishing inliers versus outliers, and establish a link with the recent promising results in detecting outliers using latent holes. Finally, we identify that adversarial examples and OoD inputs have similar latent representations. This insight allows us to develop separate methods to automatically distinguish between them by considering their non-similarities in the input space. The suggested approach enables pre-training a VAE model on specific input data, allowing it to act as a gatekeeper. This achieves two major goals: defending the DNN classifier against potential attacks and flagging OoDs. Once pre-trained, VAE can be plugged as a filter into any DNN image classifier of arbitrary architecture trained on the same data inputs without the need for its retraining or accessing the layers and weights of the DNN."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"deep learning robustness",
"out-of-distribution inputs",
"adversarial examples",
"VAE latent representation"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/1eb04b5d4e9faea8286aef116d0c8ec31463913f.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Enhancing Robustness of Deep Learning via Unified Latent Representation"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zf53vmj6k4 | Do LLMs Have Political Correctness? Analyzing Ethical Biases and Jailbreak Vulnerabilities in AI Systems | main | Active | LLM;safety;jailbreak | foundation or frontier models, including LLMs | 1;3;3;8 | 5;4;3;4 | 1;2;2;3 | 1;2;2;2 | 1;2;2;2 | 3.75 | 4 | 2 | 1.75 | 1.75 | -0.273434 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "+ What was the temperature for each model (where it is known)?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "+ Exploring how biases might affect jailbreaking is an interesting and important idea."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This work focuses on using potential biases in a model to jailbreak the model, by associating a request with a particular group."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "+ There are some major improvements to be made for the experimental results. First, all experiments are run with sampling (“the default sampling temperature”), yet there are no confidence intervals. It is entirely possible that Table 2 is a function of small perturbations or random noise since the effect sizes are small. Second, the dataset itself is small, so it could be statistically underpowered for such small effect sizes. Note: at this small of a sample size the minimum detectable effect size is rather large for a binomial distribution. Third, there's a crucial baseline missing: what about just replacement with random adjectives to rule out that this isn't just a function of (un)lucky perturbations. To build more confidence in the result, suggest that: (1) increase the size of the dataset; (2) run sampling multiple times and report confidence intervals; (3) compare a baseline with random adjectives. \n+ Typically defenses come at a cost to utility. This defense in particular, could affect normal task performance, but there is no evaluation of utility here. To improve the paper and build more confidence that the defense does not induce side effects, suggest running on a suite of standard benchmark tasks/evals to see how the defense affects performance.\n\nMinor:\n\nTable 1’s headers are backwards?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "--->> I think the paper should provide some context on 'political correctness' for readers, as well as the motivation behind studying it.\n\n--->> “Line 187: Our work builds on the existing body of research by focusing on the paradoxical consequences of intentional biases introduced for safety purposes “\n\t----->>> I am not entirely sure what this sentence refers to. I think adding references and examples would provide a better explanation.\n\n--->> Line 228-229: I believe there is a conflict in stating that the refusal prefix is the target prefix. In line 222, the target responses refer to malicious responses, while in line 232, they point to refusal phrases\n\n--->> Line 166: Missing reference for ‘walkerspider 2022’ \t\n\n--->> line 378: please fix spelling of ‘alernative’ -> ‘‘alternative’\n\n--->> I am quite confused by the subheading '3.1.2 Formulation': what is being formulated here?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "---> The paper presents an interesting aspect of safety training: safety measures intended to make LLMs more ethical can actually create exploitable biases.\n\n\n---> Experiments were conducted across a variety of LLMs, including both open-source and black-box models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors investigate the intentional biases of LLMs and how these biases can be exploited for jailbreak attacks. They created a jailbreak attack using pairs of privileged and marginalized keywords, along with prompts that ask the LLM to write a fictional story and include harmful questions. This technique, called 'PCJailbreak,' exploits these biases by incorporating different demographic keywords in the prompts. Additionally, they propose 'PCDefense,' a prompt template that uses system prompts and prompt suffixes to adjust these biases and prevent attacks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "--->> 1: The number of keyword pairs is relatively small, and the JailbreakBench dataset being used is also limited. Increasing the number of keyword pairs and incorporating a more comprehensive dataset could have made the experiments more rigorous.\n\n--->>> I believe one of the weaknesses of the paper is that it limits the experiments to a few keyword pairs and does not include an ablation study.\n\n--->>>> What will happen if we replace the words in pairs with something entirely different? Will we get the same results?\n\n--->>>> Additionally, the prompt structure used in the experiment does not determine whether the jailbreak occurs solely due to the request to write a fictional story or because of the presence of marginalized words.\n\n--->> 2: In the PC Defense, “IF THE REQUEST INCLUDES HARMFUL CONTENT, DO NOT RESPOND.” is added as the suffix prompt.\n\n---->>> In in-context learning, the model response is dependent on the input label distributions and formatting, and asking the LLM to basically check every prompt is very obvious to make LLMs to scan its input.\n\n--->>> This is likely that this suffix alone will decrease the ASR without the ‘system prompt’. \n\n--->>> I believe an ablation study should have been done to measure the efficacy of the proposed prompt. \n\n\n\n--->> 3: Line 377: \"By using a defense prompt, our defense mechanism elicits the refusal of a wide range of harmful content without requiring significant additional computing resources, which makes it an attractive alernative to Guard Models.” --- I could not find any evidence to support this claim in the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the Weaknesses above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. **Extensive Model Evaluation**: The paper evaluates a wide range of models, including some of the latest LLMs, providing a comprehensive view of jailbreak vulnerabilities across different architectures and alignment techniques.\n\n2. **Community Contribution**: By open-sourcing the code and artifacts of PCJailbreak, the authors facilitate further research on bias and jailbreak vulnerabilities, promoting transparency and enabling the community to explore and develop more robust defense strategies."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces PCJailbreak, a method designed to analyze and exploit politically correct (PC) alignment-induced biases in LLMs, which lead to differing jailbreak success rates across various demographic group keywords (e.g., gender, race). The PCJailbreak framework systematically reveals how biases injected for safety purposes can paradoxically enable effective jailbreaks, with observable disparities between privileged and marginalized groups. Additionally, the paper presents PCDefense, a lightweight defense method that mitigates these vulnerabilities through prompt-based bias adjustments without incurring additional inference overhead. However, here are a few concerns:"
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Motivation**: The paper categorizes jailbreak attacks into manually written prompts and learning-based prompts, stating that learning-based jailbreak prompts rely on gradient information and that these prompts are often nonsensical sequences. However, this overlooks natural-language jailbreak prompts, such as PAIR [1] and DeepInception [2], which are not solely gradient-based and produce coherent, meaningful language. Additionally, for manual attacks, approaches like GUARD [3] build on existing manually crafted jailbreak prompts, refining them over time to remain effective.\n\n2. **Scope of Jailbreak Attacks**: Much of the related work on jailbreak techniques in this paper appears to focus on approaches up to 2024. Given the rapid advancements in jailbreak methodologies, the paper should provide a more detailed discussion of recent jailbreak attacks, such as works like [4] and [5].\n\n3. **Keyword Generation Methodology**: The approach of directly prompting the LLM to generate keywords introduces potential issues. For instance, the generated keywords may lack diversity, as the LLM could repeatedly produce similar terms based on its training biases. Additionally, there is no evaluation or filtering mechanism to determine which keywords are more effective or appropriate for distinguishing between privileged and marginalized groups.\n\n4. **Ambiguity in Baseline Definition and Scope of Comparison**: While Table 2’s caption states it shows “baseline success rates, marginalized success rates, privileged success rates, and the difference between marginalized and privileged success rates,” the paper does not clearly define what constitutes the \"baseline success rate.\" Additionally, to strengthen the evaluation, it would be beneficial to include comparisons with a broader range of jailbreak attacks.\n\n5. **Defense Baselines**: There are some relevant papers at the prompt level to prevent harmful output, such as [6], [7], and [8]. As PCDefense also adds prompts to model system prompts and suffix prompts, it should compare the effectiveness with these methods.\n\n**References**:\n\n[1] Chao P, Robey A, Dobriban E, et al. Jailbreaking black box large language models in twenty queries[J]. arXiv preprint arXiv:2310.08419, 2023. \n[2] Li X, Zhou Z, Zhu J, et al. Deepinception: Hypnotize large language model to be jailbreaker[J]. arXiv preprint arXiv:2311.03191, 2023. \n[3] Jin H, Chen R, Zhou A, et al. Guard: Role-playing to generate natural-language jailbreakings to test guideline adherence of large language models[J]. arXiv preprint arXiv:2402.03299, 2024. \n[4] Zheng X, Pang T, Du C, et al. Improved few-shot jailbreaking can circumvent aligned language models and their defenses[J]. arXiv preprint arXiv:2406.01288, 2024. \n[5] Jin H, Zhou A, Menke J D, et al. Jailbreaking Large Language Models Against Moderation Guardrails via Cipher Characters[J]. arXiv preprint arXiv:2405.20413, 2024. \n[6] Wu F, Xie Y, Yi J, et al. Defending chatgpt against jailbreak attack via self-reminder[J]. 2023. \n[7] Zhang Z, Yang J, Ke P, et al. Defending large language models against jailbreaking attacks through goal prioritization[J]. arXiv preprint arXiv:2311.09096, 2023. \n[8] Zhou A, Li B, Wang H. Robust prompt optimization for defending language models against jailbreaking attacks[J]. arXiv preprint arXiv:2401.17263, 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See above"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "S1: Figure 1 is clear and compelling. Although Figure 2 is visually messy with the \"Safety alignment\" words across it. \n\nS2: I am pretty familiar with the jailbreaking lit and jailbreaking methods. As best I can tell, this paper is very novel. In retrospect, it seems almost obvious that this would work. But this jailbrekaing method is never something I had thought of or heard of. \n\nS3: I generally think that the overall contribution is clear and useful. I work with jailbreaking a lot, and I think that this paper is helpful and citable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a jailbreaking method that is based on pitting goals of fairness and helping marginalized groups against goals of behaving harmlessly. They use a system prompt telling the model to treat everyone fairly and deliver harmful queries to the model with statements about the user being from a marginalized group. It can moderately increase model compliance with harmful requests across models they tested."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "W1: I would recommend considering a different title. \"Political correctness\" is not a term that has the same definition to everyone, and it's a political buzzword. \n\nW2: I would recommend that the abstract text be revisited in order to be more specific. There isn't a full description of the attack or defense methods used in the abstract itself. I also think that the abstract could be updated to have smoother writing and less fluff -- I think that some of the sentences in it (especially early) are not very relevant to the specific contributions of the paper.\n\nW3: There is a claim in the paper that political correctness biases are introduced into models from the fine-tuning process. But this seems unjustified. I don't see why they wouldn't also be a result of pretraining data. \n\nW4: I think that section 2.2 is not the most thorough. It could be expanded to better discuss related jailbreaking techniques that involve persuasion and personas. \n\nMinor: A \"Walkerspider\" reference might have a typo and need to be cleaned up. \n\nMinor: I would recommend having a different example in figure 2 and 4 so that readers can see more diverse examples. \n\nMinor: Why were not claude models tested?\n\nMinor: It's principal component analysis, not \"principle\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024do,\ntitle={Do {LLM}s Have Political Correctness? Analyzing Ethical Biases and Jailbreak Vulnerabilities in {AI} Systems},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zf53vmj6k4},\nnote={under review}\n}"
},
"abstract": {
"value": "Although large language models (LLMs) demonstrate impressive proficiency in various tasks, they present potential safety risks, such as \n'jailbreaks', where malicious inputs can coerce LLMs into generating harmful content. To address these issues, many LLM developers have implemented various safety measures to align these models. This alignment involves several techniques, including data filtering during pre-training, supervised fine-tuning, reinforcement learning from human feedback, and red-teaming exercises. These methods often introduce deliberate and intentional biases similar to Political Correctness (PC) to ensure the ethical behavior of LLMs. In this paper, we delve into the intentional biases injected into LLMs for safety purposes and examine methods to circumvent these safety alignment techniques. Notably, these intentional biases result in a jailbreaking success rate in GPT-4o models that differs by 20\\% between non-binary and cisgender keywords and by 16\\% between white and black keywords, even when the other parts of the prompts are identical. We introduce the concept of *PCJailbreak*, highlighting the inherent risks posed by these safety-induced biases. Additionally, we propose an efficient defense method *PCDefense*, which prevents jailbreak attempts by injecting defense prompts prior to generation. *PCDefense* stands as an appealing alternative to Guard Models, such as Llama-Guard, that require additional inference cost after text generation. Our findings emphasize the urgent need for LLM developers to adopt a more responsible approach when designing and implementing safety measures. To enable further research and improvements, we open-source our [code and artifacts](https://anonymous.4open.science/r/PCJailbreak-F2B0) of PCJailbreak, providing the community with tools to better understand and mitigate safety-induced biases in LLMs."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"safety",
"jailbreak"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/21139d7a0d424d86f4de8796c024162850fd8a90.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Do LLMs Have Political Correctness? Analyzing Ethical Biases and Jailbreak Vulnerabilities in AI Systems"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zf777Odl6J | KA-GAT: Kolmogorov–Arnold based Graph Attention Networks | main | Active | Graph Neural Networks;Kolmogorov-Arnold Networks;Graph Attention Networks;Multi-head Attention Mechanism;Model Interpretability | learning on graphs and other geometries & topologies | 3;3;3;3 | 5;4;5;4 | 1;3;1;2 | 2;2;1;2 | 2;2;1;2 | 3 | 4.5 | 1.75 | 1.75 | 1.75 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "This reviewer has no critical ethical concerns."
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. How does KA-GAT perform compared with other GNN baselines on more test datasets?\n2. How does KA-GAT perform when compared with more recent GNN baselines?\n3. The motivations behind the proposed approach should be well discussed and more recent approaches should be investigated.\n4. Is there any theoretical analysis demonstrating the learning capabilities of KA-GAT?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The idea of making use of the Kolmogorov-Arnold layer and graph attention layer is interesting.\n2. The model is flexible to integrate with other GNN layers."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents KA-GAT, a new graph neural network for representation learning. KA-GAT combines the Kolmogorov-Arnold layer and classical graph attention layer to construct the graph neural network. Thus, the Kolmogorov-Arnold layer is claimed to improve the capability of handling complex data, and the multi-head attention layer can improve the flexibility and interpretability of the proposed KA-GAT. Experimental results obtained from classical graph datasets demonstrate that KA-GAT can outperform empirical GNNs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Experimental results are somewhat insufficient. More test datasets or learning tasks should be included in the experiments. I would like to recommend the authors conduct more experiments on well-established datasets, e.g., Pubmed, CoauthorCS, Cora-full, CoauthorPH, Flickr, and ogbn-arxiv, and test KA-GAT with more learning tasks, e.g., graph classification.\n2. Many recent GNNs are not well investigated in the manuscript. Examples include GATv2, APPNP, ADSF GNN, and ARMA GNN. It is also recommended that authors investigate other GNNs recently published in top-tier venues (e.g., NeurIPS, ICLR, ICML, TPAMI, AIJ, and JMLR).\n3. Given 2, More recent GNNs are not compared with the proposed KA-GAT.\n4. The contribution regarding algorithmic and methodological perspectives is limited. The proposed KA-GAT is based on the direct combinations of the Kolmogorov-Arnold layer and graph attention layer. Such a strategy might lack motivation. The authors are suggested to explicitly discuss why combining the Kolmogorov-Arnold layer and graph attention layer is effective in graph representation learning. Moreover, how the Kolmogorov-Arnold layer influences the performance of the proposed KA-GAT should be clearly discussed based on the experimental results. The current version of the manuscript (see Sec. 4.4.1 - 4.4.2) does not provide a meaningful analysis of the presented results. \n5. Theoretical guarantees or analysis of the proposed method (e.g., expressive power) are not provided."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1. Could the authors perform additional experiments on a broader selection of datasets and include more baseline models for a comprehensive performance comparison?\n2. Given the strong theoretical foundation of KAN, could the authors provide theoretical evidence that integrating KAN with GAT enhances model expressiveness?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The KA-GAT method proposed in the paper, which integrates KAN into GAT, suggests the potential for broader applications of KAN within Graph Neural Networks (GNNs) in the future."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper proposes KA-GAT, a Graph Neural Network (GNN) model combining Kolmogorov-Arnold Networks (KAN) and Graph Attention Networks (GAT) to handle high-dimensional, complex features in graph-structured data. It claims to achieve superior performance on the Cora and Citeseer datasets by using KAN for feature decomposition and a multi-head attention mechanism for dynamic graph component focusing."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Lack of Novelty: Integrating Kolmogorov-Arnold Networks (KAN) into Graph Attention Networks (GAT) does not offer sufficient novelty, as it primarily combines existing techniques without substantial innovation.\n2. Poor Presentation: The overall presentation of the paper is unacceptable.\n3. Insufficient Experimental Support: The experimental setup is limited, with only GCN and GAT used as baselines and tests conducted solely on the Cora and Citeseer datasets."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "see above"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The proposed KA-GAT model outperforms the baselines on both Cora and Citeseer. However, KA-GAT is only compared against GCN and GAT. Thus, the baselines that the authors chose to compare their model against are relatively weak and are not considered state-of-the-art.\n\n- From the results shown in Table 2, it seems that the KAN layer is indeed one of the most important components of the KA-GAT model, and it suggests that those layers could potentially lead to further advancements in the field of graph machine learning."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents KA-GAT, a GNN that consists of different types of layers. First, a Kolmogorov-Arnold Network transforms the initial node features and then the new features are fed to multi-head attention mechanisms along with standard neighborhood aggregation layers such as GAT and GCN layers. The KA-GAT model is evaluated in two node classification datasets. On both datasets, it outperforms the baseline methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The KA-GAT model is only evaluated on two datasets, and those datasets correspond to rather small graphs. Therefore, it is not clear whether similar conclusions could be drawn for other datasets (that correspond to different types of graphs or to larger graphs). In my view, it would strengthen a lot the paper if the proposed model was evaluated on a large number of diverse datasets. \n\n- A lot of details about the experiments are missing from the paper. For instance, it is unclear to me how the two datasets were split into training, validation and test sets. It is also not clear whether the hyperparameters of the models were optimized or whether some fixed values were chosen. In addition, for small datasets such as Cora and Citeseer, it is common practice to repeat each experiment multiple times. Since no standard deviations are provided, I guess that the authors report the performance from a single run.\n\n- Some details are missing from the paper. For example, the Multi-Head Attention GNN Layer is not properly explained in the paper. It is unclear whether this layer is also a neighborhood aggregation layer which computes new node representations. If it is indeed a neighborhood aggregation layer, the authors should discuss how this layer is different from a GAT layer.\n\n- Several architectural choices are not well-motivated. No explanations are provided regarding the KA-GAT architecture. For example, the authors do not explain why did they choose to use a single KAN layer and not more of them. In addition, the proposed model consists of both GAT layers and GCN layers. What is the reason behind that? Typically, GNNs consist of instances of a single layer, and not of many of them.\n\n- In l.95-96, the authors claim that KANs have not been widely applied to graph-structured data. This is not true since GNN models that consist of KANs have already been proposed in [1],[2],[3] and [4]. I would suggest the authors update the related work section and discuss the aforementioned works. This would properly demonstrate how this work is positioned with relation to previous works, and also help readers better understand its novelty.\n\n- A large part of the paper is devoted to the discussion of well-known concepts. For instance, the evaluation metrics that are presented in subsection 4.2 are well-known and do not deserve discussion. I would suggest the authors remove the unnecessary content and devote more space to the experimental evaluation of the proposed model.\n\n\n[1] Kiamari, M., Kiamari, M., & Krishnamachari, B. (2024). GKAN: Graph Kolmogorov-Arnold Networks. arXiv preprint arXiv:2406.06470.\\\n[2] Bresson, R., Nikolentzos, G., Panagopoulos, G., Chatzianastasis, M., Pang, J., & Vazirgiannis, M. (2024). Kagnns: Kolmogorov-arnold networks meet graph learning. arXiv preprint arXiv:2406.18380.\\\n[3] De Carlo, G., Mastropietro, A., & Anagnostopoulos, A. (2024). Kolmogorov-arnold graph neural networks. arXiv preprint arXiv:2406.18354.\\\n[4] Zhang, F., & Zhang, X. (2024). GraphKAN: Enhancing Feature Extraction with Graph Kolmogorov Arnold Networks. arXiv preprint arXiv:2406.13597."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to weaknesses above."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- The paper is well-written, clear, and easy to follow, making it accessible to a wide audience.\n- It pioneers the introduction of Kolmogorov–Arnold Networks (KANs) into graph neural networks, which is an interesting and novel approach.\n- Some experiments are conducted to validate the performance of the proposed model."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents KA-GAT, a novel model that integrates Kolmogorov-Arnold Networks (KANs) with Graph Attention Networks (GATs) to address challenges in graph neural networks (GNNs) with high-dimensional, complex features. The KA-GAT model utilizes KANs to decompose and reconstruct features, enhancing its ability to handle nonlinear relationships, while multi-head attention mechanisms improve its interpretability and flexibility. Through extensive experiments on benchmark datasets like Cora and Citeseer, KA-GAT demonstrates superior performance in accuracy, precision, and F1-score compared to traditional models like GCN and GAT."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The main weakness of the paper is that it feels like a straightforward application of Kolmogorov–Arnold Networks to GATs, without providing a strong justification for doing so. The paper lacks deeper insights into why this integration is particularly necessary or impactful.\n- The rationale for combining multiple GNN layers, such as GCN and GAT, in a single framework is unclear. It appears as if they were stacked together without a clear logical chain, raising concerns about whether sufficient tuning was done to optimize this architecture. It may limit the generalizability of the model and questions the necessity of introducing KAN. It would be interesting to see how the model performs using simpler configurations of GCN or GAT without these combinations.\n- Related to the above, the authors themselves acknowledge that the model is complex due to the many components used. While KAN is introduced with the motivation of reducing the computational complexity of MLPs, the resulting model’s complexity seems to contradict this goal, casting doubt on the motivation behind the paper.\n- In the limitations section, the suggestion that future work could explore techniques like model compression or pruning feels generic and lacks depth. The authors need to critically rethink the necessity and motivation for introducing KAN into GNNs, as the current reasoning is not well substantiated.\n- The experiments are insufficient, as they are only conducted on small, simple datasets like Cora and Citeseer. These datasets may not be representative enough to support the claim that \"traditional GNNs often fall short when dealing with high-dimensional features,” as Cora’s and Citeseer’s feature dimensions are not particularly high, which weakens the argument that KAN is essential for handling high-dimensional data.\n- Also, the claim that \"traditional GNNs often fall short when dealing with high-dimensional features\" lacks sufficient evidence. More justification and empirical support are needed for this assertion.\n- In Section 4.4, the authors simply re-express the results in table form in the text form, but do not provide enough detailed analysis or interpretation of these results. A more thorough discussion of the findings would strengthen the paper."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "KA-GAT is a graph neural network combining KAN and GAT, optimised for high-dimensional data processing and model interpretability."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024kagat,\ntitle={{KA}-{GAT}: Kolmogorov{\\textendash}Arnold based Graph Attention Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zf777Odl6J},\nnote={under review}\n}"
},
"abstract": {
"value": "Graph Neural Networks (GNNs) excel at processing graph-structured data but often struggle with complex, high-dimensional features and nonlinear relationships. To address these limitations, we propose KA-GAT, a novel model that integrates Kolmogorov-Arnold Networks (KANs) with Graph Attention Networks (GATs). KA-GAT leverages KANs to decompose and reconstruct features, enhancing the model's ability to handle complex data. The multi-head attention mechanism further improves flexibility and interpretability by dynamically focusing on different graph components. Experimental results on benchmark datasets, including Cora and Citeseer, demonstrate that KA-GAT outperforms traditional GNN models such as GCN and GAT in accuracy, precision, and F1-score. These findings underscore KA-GAT's suitability for tasks involving complex graph structures and high-dimensional features, contributing a novel architecture, enhanced interpretability, and robust experimental validation to the field."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Graph Neural Networks",
"Kolmogorov-Arnold Networks",
"Graph Attention Networks",
"Multi-head Attention Mechanism",
"Model Interpretability"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/2bcc41e589803cb4fd0e8f8c7f88b793c959c999.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning on graphs and other geometries & topologies"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/11545fbbd7482f3c6eb777235deeeffd28222dc3.zip"
},
"title": {
"value": "KA-GAT: Kolmogorov–Arnold based Graph Attention Networks"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zfIxlvKq4u | Exploring the Design Space of Autoregressive Models for Efficient and Scalable Image Generation | main | Withdraw | Image Generation;Autoregressive Model | generative models | Yi Xin;Le Zhuo;Qin Qi;Binglei Li;Xu Guo;Siqi Luo;Chang Xu;Xiaohong Liu;Peng Gao | ~Yi_Xin1;~Le_Zhuo2;~Qin_Qi1;~Binglei_Li1;~Xu_Guo6;~Siqi_Luo2;~Chang_Xu4;~Xiaohong_Liu2;~Peng_Gao3 | 3;3;5;5 | 4;4;4;4 | 2;2;2;3 | 2;1;2;2 | 2;2;3;3 | 4 | 4 | 2.25 | 1.75 | 2.5 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. In Table 1, there are other factors besides codebook size of different tokenizers, e.g., LFQ. How does these affect the performance of reconstruction and generation? In particular, the authors may conduct ablation study of LFQ with codebook size 8192 to see how it compares to Chameleon-VQ.\n2. In Table 3, there are other autoregressive models like VAR that are not included. Since VAR represents another design choice in image tokenizer, it would help understand the performance of proposed method. The authors may consider adding these results to the table.\n3. For unified framework in inference phase, does it need two separate AR and MAR? If that's the case, it significantly increases the computational overhead in training. The authors are suggested to report more comprehensive computational cost of AR/MAR models in both training and inference. \n4. Different tokenizers are evaluated on ImageNet. How does scaling to large dataset affect the conclusion? Will that benefit from larger codebook? It would be valuable to compare tokenizers on large scaled dataset. If limited by computational resources, having results on smaller dataset (e.g., subset of ImageNet) and inferring the performance at larger scale would also be beneficial. \n\nReference:\n\n[1] https://arxiv.org/abs/2404.02905"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The work investigates a valuable problem: how to design efficient and powerful autoregressive image generation model. \n2. The work includes benchmark on standard ImageNet 256. \n3. The work includes experiments with model at different scales from ~100M to 1.4B."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this work, the authors investigate the design space of autoregressive image generation (i.e., AR and MAR). On ImageNet experiments, the authors report that larger codebook size doesn't always lead to better performance. It applies bidirectional LlaMA for masked AR and achieves better performance than using vanilla Transformer. Also, the authors propose to combine MAR and AR in inference to improve the performance of MAR."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The major concern is that though the work tries to investigate a valuable problem, the analysis is not comprehensive enough. From my perspective, I find it a bit overclaimed by entitling as \"exploring the design space of autoregressive models\". For example, the paper demonstrates better performance of LlaMA over vanilla Transformer in MAR modeling. However, there are multiple modifications in LlaMA compared to Transformer yet the authors haven't further explore to figure out what is the key factors in design space. For instance, LlaMA applies rotatory positional embedding and SwiGLU FFN instead of absolute positional embedding and ReLU FFN. It would be valuable to further isolate the effect of key components in LlaMA."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "## Justification For Recommendation And Suggestions For Rebuttal:\n- Justification For Recommendation:Reference to Paper Strengths.\n- Suggestions For Rebuttal:\n 1. The analysis of the Impact of different model architectures needs to be provided.\n 2. The analysis of experimental results needs to be more detailed.\n 3. Present more experimental data to support the motivations behind the paper.\n\n## Additional Comments For Authors:\nTo enhance clarity and persuasiveness, the authors should rectify vague descriptions and inaccuracies in the details. For example, at line 278, \"Based on the results\" should specify which table or figure is being referred to. It is recommended that the authors review the entire paper to identify similar instances and make corrections."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper investigates the impact of codebook size on generation quality within discrete image tokenizers and determines the optimal size for the codebook.\n2. This paper proposes the application of QK-Norm and Post-Norm to achieve stable training of a large-scale 1.4B model, facilitating the scaling up of the model.\n3. The writing style of this paper is clear, making it easy to understand."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores a comprehensive recipe for AR models, including the selection of image tokenizers, model architectures, and AR paradigms. This paper conducts a detailed evaluation of four image tokenizers in both AR and MAR settings, investigating the impact of codebook size (varying from 1,024 to 262,144) on generation quality and identifying the most effective tokenizer for image generation. Subsequently, they propose an enhanced MAR model architecture, dubbed MaskGIL. Experiments on ImageNet demonstrate the effectiveness and superiority of this approach."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The innovation is limited. Although the exploration of discrete Image tokenizers is meaningful to me, the rest of the content, whether it's about scaling up or the Unified Framework at the Inference Phase, seems to be a direct concatenation of existing analyses and methods. I did not observe more meaningful or deeper insights. For example: (1) The combination of QK-Norm and Post-Norm mentioned in the text can make the model training more stable. In fact, QK-Norm and Post-Norm are often used in the training of large models. Therefore, I would like to see more analysis on \"the standard LLaMA architecture for visual modeling shows complex divergences due to slow norm growth in the mid-to-late stages of training.\" Whether this is only the case with the LLaMA framework, or whether there are other solutions, or whether this complex divergence is related to data? (2) Why the simple priority of AR before MAR can improve the quality of generation? Does MAR require a more accurate prior? What is the impact of the cumulative error of AR inference?\n2. The comparative experiments for LLMs are limited, involving only Llama and Transformer. The recently strong QWen series and internLM series have not been included in the comparison. The motivation of this paper is to find the optimal combination of MLLMs, which means that it is necessary to compare the combination effects of different LLMs as much as possible. The current QWen2 and IntenrLM2 are both relatively advanced open-source models, which are worth comparing and analyzing.\n3. The evaluation method is overly simplistic, with assessments conducted solely on ImageNet. It would be beneficial to see metrics such as FID on datasets like MSCOCO 30K. I would like the authors to explain why you have chosen to focus on ImageNet, and whether there are any other constraints that were considered?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Please refer to the Weakness."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper investigate the relationship between the performance and codebook size.\n2. QK-Norm and Post-Norm methods are used to enhance the training stability.\n3. It conduct experiments on text-to-image task and support generation at arbitrary resolutions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explore the design space of image AR models. It first evaluate the codebook size of image tokenizers. Based on this, this work proposes MaskGIL, which uses MAR method to ensure few steps, and conduct study on its scaling property. The authors also propose a sampling framework that combines AR and MAR sampling."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The main concern is about the limited novelty and contribution. Most of the contents in this paper have already been studied\nin previous works, as discussed below:\n - **Study of codebook size**. A similar study has already been conducted in Sec 3.1 of LlamaGen [1]. It seems that the biggest difference in this paper’s study is the attempt to use an ultra-large codebook size and show that it doesn't work. However, this conclusion is not surprising since such a large size makes it challenging for the model to learn effectively.\n - **Combining MAR (and the bidirectional architecture) with LlamaGen**. The authors mention in line 315 that the MAR method remains unexplored, but as far as I know, there has already been systematic research [2] on this, which is not discussed by the authors. Compared to [2], this paper’s method seems to simply replace its novel diffusion loss with the traditional cross-entropy loss. However, the usage of discrete tokens and cross-entropy loss is what [2] explicitly criticizes and tries to address.\n - **Effect of Scaling Up and CFG**. Popular previous works like MAR [2], VAR [3], and LlamaGen [1] all provide results for scaled-up models (e.g., Table 4 in [2], Table 6 in [1], Table 1 in [3]). Additionally, they all fully explore the effects of CFG (Tables 1, 2, and 3 in [2]; Table 3 in [3]; and Fig. 3(a) in [1]). Therefore, improvements gained by increasing the parameter size or using CFG are expected. This paper fails to present new knowledge or insights.\n - **Unified sampling framework**. Since MAR naturally supports predicting any number of tokens [2,4], adjusting the number of tokens generated per step is straightforward. [2] has already explored a cosine schedule for the number of tokens generated at each step. However, in this paper, the authors seems to simply adopt a basic approach of generating 1 token per step first and then turn to multiple tokens per step, without discussing the advantages of this method or comparing it to previously explored schedules.\nGiven these reasons, I recommend that the authors reconsider their claims of novelty.\n2. Some statements need further clarification:\n - What differences do the tokenizers studied in this paper have besides codebook size? If there are other significant differences, it is better for the authors to list them and the comparison on codebook size alone might not be valid.\n - How is MaskGIL trained? Does it follow the traditional MAR approach of masking a part of the image each time and predicting this part based on the rest part? The training process does not seem to be mentioned in the paper (please correct me if I missed it).\n\n[1] Sun et al., Autoregressive Model Beats Diffusion: Llama for Scalable Image Generation\n\n[2] Li et al., Autoregressive Image Generation without Vector Quantization\n\n[3] Tian et al., Visual Autoregressive Modeling: Scalable Image Generation via Next-Scale Prediction\n\n[4] Chang et al., MaskGIT: Masked Generative Image Transformer"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Generation. Unclear relation to MAGVIT-v2 (Yu et al - note that this reference lacks the year number in your references!): they showed that up to a vocabulary of 2^16=65k, generation performance improves monotonically. Are you claiming that this scaling behavior brakes down above 2^16? If so, there must be evidence - i.e. the plot from Magvit2 (Fig1 in their paper) should ideally be reproduced, or at least discussed in the paper.\n- Why is section 4.4. proposing to use Chameleon-VQ - the paper before seems to lay out that LlamaGen-VQ is the better choice. Please discuss and justify in the paper, or make the corresponding experiments with LLamaGen-VQ.\n- Figure 9: \"Our framework achieves an optimal trade-off by generating 128 tokens with the AR.\". Why is this the case? This remains elusive to the reader, as for an optimum one would expect a maximum/minimum in the plot (which is not at 128). This hence needs elaboration in the caption of the Figure."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "+ the paper attempts to investigate different setups to scale for best quality in AR and MAR - a much needed investigation\n+ good overview of existing methods, i.e. Figure 2 and corresponding text nicely elaborates on the existing context.\n+ the paper is understandable very well"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper \"Exploring the Design Space of Autoregressive Models for Efficient and Scalable Image Generation\" targets the investigation of a scalable setup for both autoregressive (AR) and masked autoregressive (MAR) models. To this end the paper starts out with an analysis of reconstruction accuracy of 4 Tokenizers across both paradigms, AR and MAR. Based on this the best model is scaled (by also including added normalization to the architecture), and performance to 4 methods from the literature analyzed. Finally, a unified AR/MAR geneartion framework is proposed."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The study on codebook size is confounded by the architectures of the tokenizers. To make a study on codebook size, a tokenizer must be selected, and then codebook size varied (all other things equal). If such an ablation study is not possible, an alternative would be to carefully rewrite the paper to make very clear that it analyzes from the set of 4 methods (and not the codebook size). E.g. statements like \"when the codebook size reaches a certain level, the improvement in the reconstructed image quality is limited.\" cannot be claimed in generality from the given analysis. Please rephrase throughout the whole paper.\n- Figure 4 is redundant to Figure 3. The 200 Epoch slice corresponds to shown bar plots. This is confusing to readers, in particular if skimming the paper figures. Please remove, or add it with explicit caption a subfigure to the same Figure.\n- The proposed AR/MAR framework is too briefly analyzed. E.g.the proposed method suggests FID scores might be different in the AR part vs. the MAR part of the image. This should be analzyed. Also, if this turns out to be true, the AR part could be spaced out to cover the whole image (as 'seed' generation), which will likely improve quality. A plot that shows FID vs. generation time is missing (Figure 8 gives some intuition, but quantitative evidence is needed).\n- Some formulations must be changed in the paper. Abstract: \"we introduce modifications like query-key normalization and postnormalization\" sounds like the paper invents these methods. Please make clear that this is not the case, e.g. \"we adapt recently proposed methods like query-key normalization, ...\". Also, please check some typos, e.g. \"casual transoformer\".\n- Overall, it seems to me that too much is packed into this paper. E.g. the AR/MAR combination could be its own paper if pushed more, and investigated properly (some with other topics)."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We explore the design space of mask autoregressive models to achieve efficient and scalable image generation."
},
"_bibtex": {
"value": "@misc{\nxin2024exploring,\ntitle={Exploring the Design Space of Autoregressive Models for Efficient and Scalable Image Generation},\nauthor={Yi Xin and Le Zhuo and Qin Qi and Binglei Li and Xu Guo and Siqi Luo and Chang Xu and Xiaohong Liu and Peng Gao},\nyear={2024},\nurl={https://openreview.net/forum?id=zfIxlvKq4u}\n}"
},
"abstract": {
"value": "Autoregressive (AR) models and their variants are re-revolutionizing visual generation with improved frameworks. However, unlike the well-established practices for building diffusion models, there lacks a comprehensive recipe for AR models, e.g., selecting image tokenizers, model architectures, and AR paradigms. In this work, we delve into the design space of general AR models, including Mask Autoregressive (MAR) models, to identify optimal configurations for efficient and scalable image generation. We first conduct a detailed evaluation of four prevalent image tokenizers across both AR and MAR settings, examining the impact of codebook size (ranging from 1,024 to 262,144) on generation quality, and identify the most effective tokenizer for image generation. Building on these insights, we propose an enhanced MAR model architecture, named Masked Generative Image LLaMA (MaskGIL), comprising of LlamaGen-VQ and Bidirectional LLaMA. To ensure stable scaling, we introduce modifications such as query-key normalization and post-normalization, resulting in a series of class-conditional MaskGIL models, ranging from 111M to 1.4B parameters. MaskGIL significantly improves the MAR baseline, achieving an 3.71 FID comparable to state-of-the-art AR models on the ImageNet 256$\\times$256 benchmark, with only 8 inference steps, far fewer than the 256 steps needed for AR models. Additionally, we introduce a text-conditional MaskGIL model with 775M parameters, capable of flexibly generating images at any resolution with high aesthetics. To bridge AR and MAR image generation, we investigate their combination during the inference phase. We release all models and code to foster further research."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Yi_Xin1",
"~Le_Zhuo2",
"~Qin_Qi1",
"~Binglei_Li1",
"~Xu_Guo6",
"~Siqi_Luo2",
"~Chang_Xu4",
"~Xiaohong_Liu2",
"~Peng_Gao3"
]
},
"authors": {
"value": [
"Yi Xin",
"Le Zhuo",
"Qin Qi",
"Binglei Li",
"Xu Guo",
"Siqi Luo",
"Chang Xu",
"Xiaohong Liu",
"Peng Gao"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Image Generation",
"Autoregressive Model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "xin|exploring_the_design_space_of_autoregressive_models_for_efficient_and_scalable_image_generation"
},
"pdf": {
"value": "/pdf/6579f2899d00a6782baa9e2be369bb1751d93b64.pdf"
},
"presentation": null,
"primary_area": {
"value": "generative models"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Exploring the Design Space of Autoregressive Models for Efficient and Scalable Image Generation"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||
zfQA8y3n2o | Divide-Verify-Refine: Aligning LLM Responses with Complex Instructions | main | Active | Large Language Model;Instruction Following;Constraints Following | foundation or frontier models, including LLMs | 3;3;5;6;8 | 3;3;3;4;4 | 2;3;3;3;4 | 2;1;2;2;3 | 3;3;2;2;4 | 5 | 3.4 | 3 | 2 | 2.8 | 0.860663 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Are there any examples of external tools (e.g., Python code) being used in the experiment?\n- What is the overhead of DVR compared with all the baselines?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The paper proposes a new benchmark to better evaluate complex instruction-following capabilities of LLMs.\n- The paper includes a comprehensive evaluation with a detailed analysis on the effectiveness of DVR."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Divide-Verify-Refine (DVR), which is a framework that helps LLM generate responses that can meet complex instructions. Specifically, DVR is divided into three steps: (1) Divide, where LLMs is prompted to divide instructions into multiple constraints and to prepare approximate tools for each constraint; (2) Verify, where tools are used to check whether the response meets corresponding constraints and, if not, provide detailed feedback; (3) Refine, where a refinement repository is proposed to collect successful refinement processes which will be used as few-shot examples when LLMs use the detailed tool feedbacks to refine their previous responses. Evaluation on CoDI and ComplexInstruct (a newly proposed benchmark) across different LLMs demonstrates the effectiveness of DVR."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The primary concern with this work is its novelty. While authors claim that DVR differs from CRITIC by (1) incorporating a refinement repository module to provide few-shot examples and (2) using multiple tools rather than a single tool to provide a more detailed feedback, such differences are quite minimal and don't seem to be pass the bar for ICLR.\n- The assumption of external tools being available for all existing constraints is not realistic. While authors claim that code generation models can be used to generate Python scripts (i.e., new tools) to check new constraints when there are no existing tools, the correctness of the newly generated tools cannot be guaranteed, and thus their detailed feedback will not be reliable."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. Is this system modular? I.e. what would it take for us to incorporate additional kinds of constraints, tools, etc.?\n2. Are there any other datasets you can include? Maybe datasets like code generation datasets where constraints can be more implicit but can be programmed into the prompt from the evaluator's side (like syntax correctness, correct return types, etc.)? This is dependent on the amount of effort required to get this working with such datasets.\n3. Is there a reason you don't evaluate GPT models? I understand that such errors are more prone to occur in open source models but it would be nice to see GPT-4 as a baseline"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "Firstly, I really liked how the paper was written. It was clear and easy to understand. The problem being addressed is a well documented issue of response alignment, and the technique seems to use a divide and conquer approach to solve it, which seems quite novel. The paper also presents a thorough evaluation of the technique over multiple models compared with several baselines."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a technique called DVR to improve the alignment of LLM responses specifically with the constraints given within the responses. The technique is a three stage iterative process, where the first stage gathers the constraints, the second gets the response from the LLM, and the third iteratively refines the response based on the constraints satisfied. The technique improves the performance of major LLM models by a significant amount."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I don't see too many weaknesses in the paper. One thing I would like to see addressed is a limitations or future work section to specify the shortcomings of the tool. It would also be nice to have a discussion on what it would take to expand the constraints from being purely conjunctive (which I think it is right now) to including disjunctions and negations at the very least. I also think it is important to better showcase that in each iteration of the feedback generation, an LLM is called to get the refined response. Right now when I look at the diagram, unless I am paying attention to the arrow shapes, I would think that the LLM is only called when getting the initial response."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Take a look at the weakness section"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The DVR framework introduces a structured methodology to process instructions, decomposing, verifying, and refining which can lead to better results.\nThe method is on the fly and not need for fine-tunning\nDVR improves the performance across multiple datasets"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Paper introduces Divide-Verify-Refine framework to overcome the struggele of LLMs for following instruction. In this framework first break down complex instructions into individual constraints and prepare suitable tools (Divide). Then it uses these tools to check responses and ensure quality feedback (Verify) and then it create a refinement repository to gather successful processes, using them as examples for future cases to improve the model (Refine). Also, authors develop new dataset of instructions, each containing 1-6\nconstraint."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The effectiveness of the system is on the quality and accuracy of the external tools used for verification and feedback. Poorly performing tools can degrade the overall performance of the LLM.\n\nIt would be beneficial to compare DVR method with other methods, such as ReAct, in other tasks such as reasoning tasks."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "NA"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Could the authors elaborate on how the DVR framework ensures that the refinement repository remains free from erroneous examples that could degrade LLM performance?\n\nHow might DVR handle instructions with unstructured or conflicting constraints, and are there plans to address such limitations in future work?\n\nCould further details on how tools were evaluated for their accuracy and reliability in feedback generation be provided?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The framework’s approach to decomposing complex instructions and using external tools to verify constraint adherence is novel in the context of LLM alignment without model retraining.\n\nEvaluation results demonstrate DVR's effectiveness across varying constraint complexities.\n\nThe paper is clear, with well-defined modules for dividing, verifying, and refining stages, making the strategy understandable and reproducible."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose *Divide-Verify-Refine (DVR)*, a framework designed to improve the adherence of large language models (LLMs) to complex, multi-constraint instructions, which divides instructions into single constraints, employs external tools for reliable verification and refines responses using a repository of successful adjustments. The authors conduct experiments on a new dataset to demonstrate the DVR's ability to enhance constraint adherence across various instruction types without requiring retraining."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The criteria for selecting external tools lack specificity, making it unclear how LLMs autonomously match tools to specific constraints.\n\nThe reliance on specific tools (e.g., Python-based scripts) could limit DVR’s generalizability across broader application domains or languages.\n\nWhile ComplexInstruct is valuable, further validation on industry-standard benchmarks could better position DVR’s real-world applicability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "No questions."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The DVR framework significantly enhances LLMs' ability to adhere to complex instructions containing multiple constraints, which is crucial for mission-critical applications.\n\n- Unlike fine-tuning approaches, DVR improves LLM performance without the need for extensive retraining, making it more accessible and less computationally expensive.\n\n- By leveraging external tools for verification, DVR provides a more reliable feedback mechanism than LLMs' self-reflection, which tends to be unreliable."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents the Divide-Verify-Refine (DVR) framework, which enhances the ability of Large Language Models (LLMs) to follow complex instructions with multiple constraints. The DVR framework consists of three steps: dividing complex instructions into single constraints, verifying responses with external tools for reliable feedback, and refining responses using a repository of successful refinement examples. The framework is evaluated on two datasets, showing significant improvements in constraint adherence without the need for retraining. It addresses the challenges of feedback quality and constraint diversity by integrating tools and leveraging past experiences, respectively. The paper concludes that DVR offers a scalable solution to enhance the practical usability of LLMs in real-world applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- When preparing appropriate tools for verification, how accurate are the tools? Are there many problems with the tools themselves? If there is a problem with the tools, it can cause problems in all subsequent modules.\n- For completely new constraints, there may not be examples where a tool is readily available. Imagine a scenario where we need to develop a large language model (LLM) to generate text in a specific style, say \"Gothic\" style, but currently there is no tool to directly verify if the text is in this specific style.\n- DVR has more flow modules, which results in the experimental setup not being very solid."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Our framework enhances LLMs' constraint-following ability by leveraging external tool feedback and a refinement repository without retraining."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024divideverifyrefine,\ntitle={Divide-Verify-Refine: Aligning {LLM} Responses with Complex Instructions},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zfQA8y3n2o},\nnote={under review}\n}"
},
"abstract": {
"value": "Recent studies show that LLMs, particularly open-source models, struggle to follow complex instructions with multiple constraints, hindering their adoption in mission-critical applications. Despite the importance, methods to improve LLMs' adherence to such constraints remain largely unexplored, and current research focuses primarily on evaluating this ability rather than developing solutions. While a few studies enhance constraint adherence through model tuning, this approach is computationally expensive and heavily reliant on training data quality. An alternative is to leverage LLMs' self-correction capabilities, allowing them to adjust responses to better meet specified constraints. However, this self-correction ability of LLMs is limited by the feedback quality, as LLMs cannot autonomously generate reliable feedback or detect errors. Moreover, the self-refinement process heavily depends on few-shot examples that illustrate how to modify responses to meet constraints. As constraints in complex instructions are diverse and vary widely (e.g., text length, number of bullet points, or inclusion of specific keywords), manually crafting few-shot examples for each constraint type can be labor-intensive and sub-optimal. To deal with these two challenges, we propose the Divide-Verify-Refine (DVR) framework with three steps: (1) Divide complex instructions into single constraints and prepare appropriate tools; (2) Verify: To address the feedback quality problem, these tools will rigorously verify responses and provide reliable feedback (e.g., Python scripts for format checking or pre-trained classifiers for content analysis); (3) Refine: To address the constraint diversity challenge, we design a refinement repository that collects successful refinement processes and uses them as few-shot demonstrations for future cases, allowing LLMs to learn from the past experience during inference. Additionally, recognizing that existing datasets lack complexity and have internal conflict, we develop a new dataset of complex instructions, each containing 1-6 constraints. Experiments show that the framework significantly improves performance, doubling LLama3.1-8B's constraint adherence and tripling Mistral-7B's performance on instructions with 6 constraints. The code and dataset are available at https://anonymous.4open.science/r/CODE_ICLR2025-52CE/README.md"
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Large Language Model",
"Instruction Following",
"Constraints Following"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/b1ad1ca608683276811d18103f3af4aeb7975101.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Divide-Verify-Refine: Aligning LLM Responses with Complex Instructions"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zfeso8ceqr | Deconstructing What Makes a Good Optimizer for Autoregressive Language Models | main | Active | optimization;LLMs;language models;Adam | foundation or frontier models, including LLMs | 3;5;5;8 | 3;3;3;4 | 2;3;2;3 | 1;2;2;3 | 3;3;3;4 | 5.25 | 3.25 | 2.5 | 2 | 3.25 | 0.889297 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Despite the issues mentioned in the weakness, I have the following additional questions:\n\n1. why the loss curve for signum are different in Figure 4, $\\beta_1$ and Figure 5, $\\beta_2$? Signum is defined as lion with $\\beta_1=\\beta_2$, if I remember correctly.\n\n2. It would be interesting to add AdaSGD (Wang et al., 2020) into comparison, which only uses one more register than normal SGD, but has much better convergence, stability and tolerence to larger learning rate. AdaSGD can be viewed as a global version of adalayer, i.e., by viewing all the parameters as one layer. A very recent work by [Xie et al.,2024] shows that AdaSGD closes a large portion of the gap between SGD and Adam on GPT2. Replacing the SGD part in adalayer by AdaSGD might help us better understand the importance of adaptivity at different levels of granularity, under the unified framework of AdaLayer, or more generally Adam for arbitrary partition of parameter blocks.\n\n3. typo. \"in perplexity\" in line 317 should be \"in validation loss\"?\n\nI am willing to improve my score if the authors could address my concerns."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. This paper is well-written and easy to understand. The experimental setups, discussion and interpretations of the results, and limitations of the methods are spelled out clearly. Though it might not have great novelty, I think the extensive study on hyperparameters on different optimizers is a valuable contribution to the LLM optimization community. I enjoyed reading this paper.\n2. Soundness of the result: As the authors mentioned in the paper, due to limits of computational resources, they only do 1D hyperparameter sweep, instead of 2D or even higher-order sweeps. This is fine for the major finding of the paper, which is a positive result, that the non-SGD mainstread optimizers are robust to choice of wide range of hyperparameters. Tuning remaining hyperparameters will definitely make the best performance of the current hyperparameter better. \n3. The finding that last layer and layernorm parameters need more fine-grained (than layerwise) adaptivity is interesting. It shed lights on future research towards how Adam works and memory-efficient variant of Adam."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The study compares optimization algorithms, including SGD, Adafactor, Adam, Lion, and Sophia, for training language models of different sizes and architectures. Results show that, apart from SGD, all algorithms achieve similar performance and robustness to hyperparameter variations, suggesting practical considerations like memory and simplicity may guide optimizer choice. The researchers further dissect Signum and Adalayer to explore Adam’s adaptivity effects. They find that adaptivity on the last layer and LayerNorm parameters is essential for maintaining performance and stability, highlighting these elements as crucial for reliable training."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Though I appreciate the extensive abalation on the LLM training experiments with different hyperparameters, the interpretation and the takeways of the experiments are vague and imprecise. I will elaborate it below.\n\n1. The authors often use \"Comparable\" to describe the performance of models trained with different hyperparameters, which I could not find a precise definition in the paper. A validation loss of 3.07 may look close to 3.10 in terms of the relative ratio, but because the minimum of loss is not zero, the suboptimality between 3.07 and 3.10 maybe very significant, say if the minimum of population loss is 3.0 for 150M models. \n\n As a result of imprecise definition, it is not clear what is the consequence of having comparable validation loss. Does it imply such gap between comparable losses are negligible so in practice if people use any hyper-parameters which lead to losses comparable to optimal validation loss in the first attempt, they are satisfied and do not need to spend more compute to rerun the experiments for the optimal optimal validation loss? If they would still like to rerun the experiment, no matter how small the gap in validation loss is, having a wide range of hyperparameters lead to comparable performance does not imply ease of hyperparameter tuning.\n\n2. The authors write in line 327 \"Takeaway: generally algorithms are more stable with respect to other hyperparameters and the possible gains in performance are relatively small compared to learning rate and momentum.\"\n\n I do not fully agree with this claim take-home message, given the experiments in the current draft. It seems that the authors get this conclusion just from the shape of the loss curves in those 1D hyperparameter sweep plots, instead of based on some quantative metrics. This could be problematic, because the range of some hyperparameter sweep in section 3.5 for hyperparameters other than learning rate and momentum seems to be smaller than that of learning rate and momentum. I am convinced that for sufficiently small WD and epsilon, the final validation losses are nearly the same. But for warmup, batch size, and $beta_2$, the range are smaller and thus I am not convinced. For example, the ratio between maximal and minimal batch sizes tried in the experiments are just 8, while the ratio is more than a thousand for learning rate. But given that the authors are training in the \"chinchilla optimal\" regime, which means the flops are fixed for different batch sizes, we should expect changing batch size can have similar effect to changing learning rates given the linear scaling rule for SGD [Goyal et al., 2017, Li er al.,2021] or square-root scaling rules for Adam [Malladi et al., 2022]. Therefore I suspect if the authors also vary batch size in a range of 1000 times, the impact of batch size would be much larger. (The authors also mention the 2D effect between batch size and learning rate in line 166)\n\n For pretraining percentage and $beta_2$, I encourage the authors to include more extreme values to support the claim that these hyperparameters do not matter. Instead showing them they do not matter in the current small range, it is more informative to show to the readers at what extreme values the loss starts to increase significantly. It is possible for warmup that we can completely get rid of it, and in this case it is useful to include that in the plot, just like weight decay.\n\n\n**Soundness of finding in Section 3.6**: First the citation of Lemma 1 from [Balles\\&Henni, 2018] seems to be quite different from what that is in the cited paper. Second, given the assumption in the Lemma 1, $\\delta_{adam}$ seems just defined as $\\frac{\\mathbb{E}[m]}{\\sqrt{\\mathbb{E}[v^2]}}$, and the lemma implies that $\\delta_{signum}$ must be $1$, which is not necessarily true, when sign of $m$ is negative. Maybe the authors are missing an absolute sign on $\\mathbb{E}[m]$.\n\nI also do not understand the argument between line 357-line 360 when $\\beta_1=\\beta_2$. The authors are essentially claiming for all coordinates and all time steps, the first and square root of second moment have the same ratio, which I do not see why it should be true. This prediction could be easily checked by just plotting the update magnitude per coordinate for Adam and see if they are roughly the same. If the authors allow a different scaling factor for every coordinates, then this claim becomes trivial because all first-order optimization algorithm using diagonal preconditioning have the same sign for each coordinate in its update and only differ by scaling."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "* It is intuitive to me that adaptivity can help with optimization of LayerNorm as the affine parameters are applied in a coordinate-wise way, but I can't understand why the last layer (which I think is the embedding layer in the case of these models?) needs adaptivity. Can the authors please elaborate on this?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "* The thorough empirical evaluation fulfills a missing piece in the current literature of optimizers used in training language models, as most works in this literature stick to Adam.\n* The experimental grids used for different experiments seem sufficient to draw robust conclusions. The authors incorporate models of different scales and explore 1 dimensional grid searches which is reasonable given the intense compute needs of such studies.\n* This work uncovers an interesting phenomenon which attributes most of the adaptivity gains from Adam and similar algorithms to normalization and last layer's parameters.\n* Considering stability of the training algorithms wrt hyperaparameters as opposed to the final performance is an important aspect since eventually a practitioner wouldn't be able to find the best possible hyperparameters but the hope would be to be able to easily food \"good\" set of hyperparameters."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper conducts a thorough experimental evaluation of the performance and stability of common optimizers used in pre-training language models and uncovers interesting and previously unknown parameters in the transformer architecture that seemingly benefit more from the adaptivity of some of these optimizers."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "* One major drawback of this study could be the lack of a search over min learning rate at the end of cosine scheduling. As the authors have mentioned, learning rate plays the most important role among all these parameters. Recent studies (https://arxiv.org/abs/2410.05192) have shown that the final stage of training where the learning rate is decreased down to the minimum learning rate plays a crucial role in determining the final performance. Based on this, I'd be curious to know if certain algorithms would outperform others if the min learning rates were chosen carefully, since in that case individual momentums and how the history of gradients is dealt with could significantly differ among different algorithms. I would predict that using some optimizers, some larger learning rates would result in drastically better performances given that the minimum learning rate is small enough.\n\n* I think implementing AdaFactor and AdaLayer would be more fruitful if parameters corresponding to different attention heads would be disentangled. Since the authors don't mention anything in this regard, I assume that they haven't implemented these algorithms to do so, but the nature of computations in attention heads would maybe benefit from adaptivity isolated to the attention head over all of the attention parameters. \n\n* Recently shampoo has gained significant popularity in training language models. It would be nice if the authors could incorporate shampoo among these optimizers to have a more complete story.\n\n* It would be nice if the authors could elaborate on how optimality of different hyperparameters changes as the scale increases."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- line 165, are there only two hyperparameters in the problem? Because if there are more, then the optimal solution is only achievable by solving the N-D problem, with N being the number of hyperparameters.\n\n- While I agree with the authors that cross evaluating all combinations of hyperparameters is intractable, why not use Bayesian optimization to find the best set of hyperparameters for each experiment?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The Authors show that most optimizers, other than SGD, can match Adam in stability and performance across varied model sizes and hyperparameter settings. Furthermore, it reveals that, to recover stability concerning learning rate and to maintain strong performance, adaptivity is essential, particularly in the last layer and LayerNorm parameters. This insight challenges the notion that Adam is uniquely suited for language model training."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors argue that the Adam optimizer is the default choice for most practitioners training language models, and they seek to evaluate this belief by comparing Adam against other popular optimizers. Their findings show that, except for SGD, other optimizers perform on par with Adam. Following this, they explore what components contribute to Adam's success by evaluating two simplified versions of it: Signed Momentum and Adalayer."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- Limited contribution: The main contribution of the paper is the finding that adaptivity on both the last layer and LayerNorm provides the necessary conditions for retaining performance and stability with respect to the learning rate.\n\n- The paper could serve as a guide for those looking to choose an optimizer by comparing performance versus computational cost. However, after reading it, I didn’t feel I gained a clear insight to help decide on a better optimizer. In this sense, the paper has potential but feels incomplete."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Many follow-up works aim to improve Adam's performance or to develop memory- or compute-efficient versions of it. What is the rationale behind selecting these specific optimizers for comparison? Was the choice based on practical popularity, conceptual connections to Adam, or some other criteria?\n\n- The plots comparing final validation loss (e.g., Figure 1) are presented so that each optimizer’s optimal learning rate aligns, with the x-axis showing multiples of this optimal learning rate. However, why should different optimizers be compared over the same scale of learning rate values? For example, SGD appears the most sensitive to changes in learning rate, but could this be due to it requiring a finer-grained learning rate grid for a fair comparison?\n\n- A concurrent work, [Zhang et al. 2024: Adam-mini: Use Fewer Learning Rates To Gain More], also explores the concept of a coarser, \"parameter-group\" -wise learning rate for Adam, proposing it as the minimal level of adaptivity required. While it is of course not required to cite or evaluate concurrent work, a comparison with AdaLayer would be interesting given the similarity in approach. It would also be useful to see whether *Adam-mini* aligns with this paper’s findings on layer-specific adaptivity. This paper shows that adaptivity in the last layer and LayerNorm is necessary, yet limiting adaptivity to these layers alone still underperforms compared to Adam. So, what degree of adaptivity is \"sufficient\" to achieve Adam's full performance?\n\n- Minor changes are needed on some of the later plots, as they are difficult to read due to small font sizes in the legends and axes, and they lose resolution when zoomed in. Please adjust these for clarity."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The evaluation of hyperparameters is thorough, and the documentation of experimental details is complete. Although it is limited to one-dimensional changes and does not capture the interplay between hyperparameters, the authors clearly state these limitations.\n\n- The observation that adaptivity in the LayerNorm and final layers is necessary for LLMs is interesting."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper evaluates several optimizers, including SGD, Adam, Adafactor, Lion, and Sophia, in the context of large language models (LLMs). The results suggest that all optimizers, except for SGD, perform similarly regarding optimal performance and sensitivity to hyperparameters. The authors then further investigated two simplified versions of Adam: Signed Momentum and AdaLayer. They found that adaptivity in the LayerNorm and final layers is essential for achieving stable performance and learning rates."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- My main concern with this paper is the significance of its contribution. The paper does not introduce new algorithms or provide insights into why some evaluated algorithms work or fail; it is limited to an empirical comparison. While this can still be valuable, I’m unsure if the paper adequately addresses the question posed in its title, “What Makes a Good Optimizer for Autoregressive Language Models.” It also does not seem to explain how the characteristics of “autoregressive” or “language” models interact with optimization or why the results might differ in other tasks and architectures. Additionally, the organization is somewhat confusing, and it’s unclear how the two parts of the paper relate.\n\n- There are quite a few observations from each experiment, but it’s unclear what the main message or takeaway of the paper is. For example, it doesn’t clearly outline what practitioners should do, what future researchers in algorithm design should focus on, or provide any conceptual or theoretical insights that explain these observations. The paper could be significantly improved by clarifying the main questions it actually addresses and having a better discussion paragraph on what others could do with this information. \n\n- Citations of other optimizer-comparison papers could be more comprehensive. For instance, [Schmidt et al., 2020: *Descending through a Crowded Valley - Benchmarking Deep Learning Optimizers*] is an example that could be included."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024deconstructing,\ntitle={Deconstructing What Makes a Good Optimizer for Autoregressive Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zfeso8ceqr},\nnote={under review}\n}"
},
"abstract": {
"value": "Training language models becomes increasingly expensive with scale, prompting numerous attempts to improve optimization efficiency. Despite these efforts, the Adam optimizer remains the most widely used, due to a prevailing view that it is the most effective approach. We aim to compare several optimization algorithms, including SGD, Adafactor, Adam, Lion, and Sophia in the context of autoregressive language modeling across a range of model sizes, hyperparameters, and architecture variants. Our findings indicate that, except for SGD, these algorithms all perform comparably both in their optimal performance and also in terms of how they fare across a wide range of hyperparameter choices. Our results suggest to practitioners that the choice of optimizer can be guided by practical considerations like memory constraints and ease of implementation, as no single algorithm emerged as a clear winner in terms of performance or stability to hyperparameter misspecification. Given our findings, we further dissect these approaches, examining two simplified versions of Adam: a) signed momentum (Signum) which we see recovers both the performance and hyperparameter stability of Adam and b) Adalayer, a layerwise variant of Adam which we introduce to study the impact on Adam's preconditioning for different layers of the network. Examining Adalayer leads us to the conclusion that, perhaps surprisingly, adaptivity on *both* the last layer and LayerNorm parameters in particular are necessary for retaining performance and stability to learning rate."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"optimization",
"LLMs",
"language models",
"Adam"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/11c3566129e6a21ba0d22a0f98131d6e92e46cbe.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Deconstructing What Makes a Good Optimizer for Autoregressive Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zfgYC3sDt6 | Understanding and Mitigating Miscalibration in Prompt Tuning for Vision-Language Models | main | Active | Vision-Language Models;Confidence calibration;Outlier Regularization;Prompt Tuning | alignment, fairness, safety, privacy, and societal considerations | 3;5;5;5 | 5;4;3;3 | 2;3;2;2 | 1;3;2;2 | 2;3;3;3 | 4.5 | 3.75 | 2.25 | 2 | 2.75 | -0.870388 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please refer to the weakness part."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. A new blind point was identified (CoOp and KgCoOp becomes over/under confident)\n2. Experimental results show performance improvement when the proposed regularizer is added."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper, through CoOp and KgCoOp, observe that when a model undergoes adaptation via prompt tuning, CoOp tends to be overconfident in novel classes, while KgCoOp, on the contrary, becomes underconfident. To address this tendency towards overconfidence/underconfidence, this paper propose a Dynamic Outlier Regularizer (DOR) term, which demonstrates performance improvements when the proposed regularizer is added to the algrorithms for prompt learning of VLMs."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The motivation was inferred from CoOp and KgCoOp, but in fact, these two algorithms lack adequate consideration for novel classes. For an effective analysis of this phenomenon, algorithms that directly account for novel classes should be utilized, such as CoCoOp, MaPLe, PromptSRC, DEPT, and TCP. If similar tendencies are observed in these algorithms, it would strongly support the authors' claim.\n\n2. It appears that further explanation is needed regarding the concept of \"Texture divergence.\" From my understanding, this divergence is due to the diversity of textual representation arising from CoOp's prompt learning method. However, finding concrete evidence to confirm this explanation is challenging. Additionally, a more detailed explanation of the notation, particularly the keywords emphasized in the paper, would enhance reader's understanding if the study.\n\n3. More comparisons with other algorithms are necessary. Given the large number of prompt learning algorithms, further experimental comparisons are needed to confirm whether the effect of this regularizer is generalizable. (Similar with the first weakness statement)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See the weakness section."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper identifies a relevant issue in the domain of prompt tuning and confidence calibration in VLMs, an area of growing importance.\n- The authors introduce a novel idea of using dynamically sampled textual outliers to address calibration inconsistencies, and the approach shows effectiveness across various datasets.\n- DOR’s flexibility in working with multiple prompt-tuning methods is a potential advantage."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes Dynamic Outlier Regularization (DOR) to improve confidence calibration in prompt-tuned vision-language models (VLMs), particularly in CLIP. The authors argue that current prompt-tuning methods lead to miscalibration by creating a trade-off between base and new classes, with CoOp overconfident on new classes and KgCoOp underconfident on base classes. DOR aims to resolve this trade-off by introducing textual outliers to regularize model predictions, promoting consistent calibration across both base and new classes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper does not include comparisons with several recent and closely related methods, such as CoPrompt and PromptSRC, which also address calibration issues and trade-offs between base and new classes. Without these comparisons, it is unclear whether DOR provides any substantial advantage over the state of the art, especially since these methods were specifically designed to tackle the same calibration challenges.\n\n- The primary claim—that prompt-tuning methods like CoOp and KgCoOp introduce calibration trade-offs between base and new classes—has already been extensively studied in prior works. For instance, CoPrompt effectively handles these issues and includes mechanisms specifically designed to manage calibration across both class types. As such, the problem statement lacks novelty, and the paper provides insufficient rationale for why DOR would be preferable to these existing methods.\n\n\n- While the paper offers some empirical evidence for DOR’s effectiveness, it lacks analysis that explains why the use of textual outliers should systematically address calibration trade-offs. \n\n- The proposed solution, while conceptually interesting, lacks practical guidelines on how to effectively select and implement outliers in a real-world setting. Given that the efficacy of DOR relies on appropriate textual outlier selection, more detailed criteria or algorithms for selecting these outliers would be necessary for practitioners to adopt this method.\n\n- Interestingly, some of the latest methods show less improvement with the proposed solution compared to some of the earlier methods like CoOp. This indicates that the latest methods are already capable of handling the problem and don't require such a solution proposed in this paper. Again, the paper lacks a comparison to the latest method, making it difficult to understand if it has any usage."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. When you calculate the semantic similarity between textual labels in WordNet and the base classes, do you use cosine similarity? Please Clarify. Would using a different metric (e.g. Euclidean distance) impact results?\n2. The outliers are selected based on the top-K, but there’s no mention of a specific similarity threshold. Would setting a threshold affect performance?\n3. How frequently are the outliers updated during training? Does the frequency affect DOR’s calibration performance?\n4. In page 8, line 407-408, is it a typo (should be \"CoCoOp\" ranther than \"CoOp\") or a wrong statement (the number is wrong if you campred to the zero-shot CLIP)? Please correct it.\n5. What criteria were used to select visual outliers from ImageNet-1K? How to ensure these outliers are sufficiently distinct from base classes without introducing irrelevant noise?\n6. How does DOR influence the feature space of base classes when incorporating visual outliers?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. DOR integrates smoothly with popular prompt-tuning methods (e.g., CoOp, KgCoOp) without requiring major architectural changes, making it easy to adopt in existing pipelines. \n2. The authors provided insightful analysis of how current prompt-tuning methods impact confidence calibration, with clear explanations for why overconfidence or underconfidence arises in certain settings. The motivation is clear.\n3. The manuscript is well-written and in a good logic."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces Dynamic Outlier Regularization (DOR), a method to improve confidence calibration in fine-tuned VLMs by controlling textual feature divergence (also can be extended to visual tuning) through the use of selected outliers, thereby enhancing model reliability on both base and novel classes."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The ablation study is insufficient, e.g. how sensitive DOR is to the choice of outliers and whether different selection strategies could yield better results?\n2. The experiments are largely limited to standard benchmarks without applying the method to domain-specific tasks (e.g., medical imaging or autonomous systems as the authors mentioned in the Introduction), where calibration is especially critical."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See above."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The author conducted robust experiments that demonstrate how prompt fine-tuning prior to happiness can lead to a decline in the model's calibration performance.\n\n2. The paper introduces an efficient normalization method designed to enhance the calibration performance of both base and novel classes.\n\n3. The paper provides performance results across multiple calibration evaluation metrics and a range of experimental settings.\n\n4. The writing is clear and well-structured, making it easy to read."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the calibration performance of CLIP following fine-tuning. The author observes a trade-off in calibration between base and new classes and proposes a method called Dynamic Outlier Regularization (DOR). DOR samples categories unrelated to the base class from a large vocabulary to minimize the feature deviation of novel textual labels. Empirical results demonstrate that this approach outperforms the standard fine-tuning method across various settings."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Previous works [1,2] have examined the calibration performance of pre-trained CLIP after fine-tuning. However, your paper lacks experimental results comparing your method with these studies. We recommend that you include such comparisons in your work.\n2. Given that your method is based on experimental observations from CoOp and KgCoOp, we have concerns about its generalizability. For example, in Table 1, your method underperforms compared to Vanilla TCP in half of the settings.\n3. This article primarily selects outliers from WordNet. We are curious whether using different lexical databases significantly affects the results.\n\nIf you can include relevant experiments and address my questions, I will consider increasing the score.\n\n[1] Enabling Calibration In The Zero-Shot Inference of Large Vision-Language Models.\n\n[2] Towards Calibrated Robust Fine-Tuning of Vision-Language Models."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose Dynamic Outlier Regularization to improve CLIP’s calibration under various evaluations without compromising vanilla fine-tuning."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024understanding,\ntitle={Understanding and Mitigating Miscalibration in Prompt Tuning for Vision-Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zfgYC3sDt6},\nnote={under review}\n}"
},
"abstract": {
"value": "Confidence calibration is critical for the safe deployment of machine learning models in the real world.\nHowever, such issue in vision-language models like CLIP, particularly after fine-tuning, has not been fully addressed.\nIn this work, we demonstrate that existing prompt tuning methods usually lead to a trade-off of calibration between base and new classes:\nthe cross-entropy loss in CoOp causes overconfidence in new classes by increasing textual label divergence, whereas the regularization of KgCoOp maintains the confidence level but results in underconfidence in base classes due to the improved accuracy.\nInspired by the observations, we introduce Dynamic Outlier Regularization (DOR) to ensure the confidence calibration on both base and new classes after fine-tuning. \nIn particular, we propose to minimize the feature deviation of novel textual labels (instead of base classes) sampled from a large vocabulary.\nIn effect, DOR prevents the increase in textual divergence for new labels while easing restrictions on base classes.\nExtensive experiments demonstrate that DOR can enhance the calibration performance of current fine-tuning methods on base and new classes."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Vision-Language Models",
"Confidence calibration",
"Outlier Regularization",
"Prompt Tuning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/e20720e7772dd040ccee669f8595a5f75f57a994.pdf"
},
"presentation": null,
"primary_area": {
"value": "alignment, fairness, safety, privacy, and societal considerations"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/ceb29f1dedadcc9e230fcc7657823221e83d0750.zip"
},
"title": {
"value": "Understanding and Mitigating Miscalibration in Prompt Tuning for Vision-Language Models"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zg3ec1TdAP | Context Clues: Evaluating Long Context Models for Clinical Prediction Tasks on EHR Data | main | Active | ehr;foundation model;long context;clinical prediction making;healthcare | applications to computer vision, audio, language, and other modalities | 3;5;5;6;8 | 3;4;4;4;4 | 2;2;3;3;4 | 2;2;2;3;3 | 3;3;4;3;3 | 5.4 | 3.8 | 2.8 | 2.4 | 3.2 | 0.738549 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Since the author's testing only showed good results with small Mamba models, it's uncertain whether larger Mamba models would perform better than traditional GPT."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. Introduce non-Transformer architectures (such as Mamba) to process medical data.\n\n2. The paper demonstrates the potential of the Mamba model’s long-context capabilities for future clinical applications.\n\n3. The advantage of this paper is that Mamba can perform well with linear complexity."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores enhancing EHR data processing models by assessing the impact of context length and model architecture on clinical prediction tasks. Four architectures—GPT, Llama, Hyena, and Mamba—were tested with various context lengths, with Mamba (16k token context) achieving state-of-the-art results in 9 of 14 EHRSHOT benchmark tasks. The study identifies three EHR-specific factors influencing model performance: copy-forwarding (repeated diagnoses), irregular time intervals, and disease progression. Findings show longer contexts generally improve performance, with robustness in EHR data handling, though results vary across architectures, as Hyena showed performance drops beyond 4k tokens. This research advances medical data prediction and offers insights on processing long-sequence data, despite limitations like transformer model computational demands and single-institution data."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Both GPT and LLaMA have a maximum context length of only 4096 tokens, so it's not appropriate for the authors to conduct tests with 16K length as a benchmark.\n\n2.The authors should provide additional details about what the 14 tasks in EHRSHOT include to facilitate better comparison.\n\n3. The paper mentions using the standard deviation of time intervals to divide patients into four groups (Q1-Q4). Regarding the groups with the most regular time intervals and the most irregular time intervals, the standards vary across different diseases, and testing of time intervals should be conducted according to specific diseases.\n\n4. The paper mentions achieving an AUROC of 0.807, but it's confusing that the specific content of the 14 tasks is not listed."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "I included my questions in the Weakness section, but I'll summarize the actionables below:\n\n1. **The most critical improvement to this paper**: Include a few-shot/transfer-learning results, the use of mean pooling the last L tokens for all evaluations is not a well supported embedding strategy. Alternatively, you have a zero-shot capable model, why not evaluate it in the zeroshot setting as past works do [1,2,3]? At the moment the limited embedding strategy greatly diminishes the generalizability of your results. If this were resolved, I would significantly increase my rating for the paper.\n2. Add compute time results, and analysis of the diminishing or increasing predictive performance returns as you increase compute times (via increasing sequence lengths) across methods.\n3. A more complete Analysis of the erratic GPT-model Perplexity behavior\n4. Add a comparison with linear attention models\n\n\n[1] Renc, Pawel, et al. \"Zero shot health trajectory prediction using transformer.\" NPJ Digital Medicine 7.1 (2024): 256.\n\n[2] Kraljevic, Zeljko, et al. \"Foresight—a generative pretrained transformer for modelling of patient timelines using electronic health records: a retrospective modelling study.\" The Lancet Digital Health 6.4 (2024): e281-e290.\n\n[3] McDermott, Matthew, et al. \"Event Stream GPT: a data pre-processing and modeling library for generative, pre-trained transformers over continuous-time sequences of complex events.\" Advances in Neural Information Processing Systems 36 (2023): 24322-24334."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors define metrics for evaluating the severity of these properties on any dataset.\n\n2. They demonstrate that as the RR metric and irregularity metric increase, model performance decreases (via patient stratification experiments).\n\n3. The demonstration that perplexity does not generally decrease with sequence length is a major deviation from text data where it is well known to decrease with sequence length. This is because future EHR data is less predictable from previous tokens. The assumption that perplexity reduces with context length for EHR \n\n4. The authors demonstrate that for all three properties, increasing sequence length helped improve performance (either via improved brier scores or perplexity)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper analyzes qualities unique to EHR data that are challenging when scaling up sequence lengths. They identify the key properties of copy-forwarding (EHR data is repetitive), irregular time intervals (time intervals vary greatly between tokens), disease complexity (EHR data actually has generally increasing perplexity overtime--unlike text data--as far out future trajectories are hard to predict).\n\nThe authors share metrics for quantifying the severity of these three properties on EHR datasets, and demonstrate that severity of these properties correlates with worse performance. They additionally show that longer sequence lengths can help."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The authors provide in Figure 4 plots of perplexity over tokens for different context-length models. The GPT model has wildly varying perplexity over token positions which is described by the authors as being caused by \"training instability\". I think a more thoughtful analysis of the issue here is required, because it would otherwise look like the cause is a bug.\n\nWhy isn't transfer-learning or few-shot included. A major problem in the evaluation is that representations are obtained by averaging the last L token representations from the transformer (This is a one-liner in the appendix and really should be added to the main paper and be clearly communicated in the limitations section). It would be great to see these results in the few-shot setting. I imagine that the performance improvements as you increase sequence length would be even more extreme.\n\nThis paper should include comparisons to linear attention models that practitioners are interested in.\n\nThis paper does not communicate compute budgets, such as wall-times and the hardware used for these sequence lengths. Could a plot communicating performance vs the compute-load be provided to help justify whether these improvements are worth the added compute time.\n\nSince you trained an autoregressive sequence model, you could perform a zero-shot evaluation where given the past tokens, you autoregressively generate the future tokens and analyze this generated future trajectory for the binary classification task. This paper does not demonstrate whether these results generalize to the zero-shot. I think that analysis is out of scope for this work but should be mentioned in the limitations."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- Statistics of code, event, and token are confusing. \n\n \n- Diagnosis codes for chronic diseases can frequently appear in a patient’s EHR, often for billing purposes rather than indicating an active health issue. For instance, if a patient with a chronic condition (e.g., COPD or obesity) visits the hospital for an unrelated condition, the chronic disease code may not appear in that visit. In contrast, for acute diseases, the presence of a code in the record typically indicates an active case during that visit. How can token repetition be modeled effectively for these two types of diseases?\n\n\t\t\n- In line 261, the authors reported that the vocabulary has 39818 tokens. Is this the vocabulary of EHRSHOT, or does EHR-OMOP also share it? \n\t\t\n- Meanwhile, in line 950 of Appendix C, it is reported that 39811 codes are selected. Is \"code\" here equivalent to \"token\"? \n\t\t\n- In Table 3 in Appendix A, The number of unique codes in EHR-OMOP is much more than the token vocabulary mentioned earlier. Is this the original vocabulary without removing infrequent codes?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The authors benchmark both transformer-based and subquadratic architectures on EHR data\n2. The authors identified and quantified three challenges present in EHR data.\n3. The authors conduct experiments to show the effectiveness of long-context models on EHR data."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The manuscript benchmarks four foundation model architectures on Electronic Health Records (EHRs) to investigate the impact of context length on the model performance of downstream tasks. Moreover, the authors identified and quantified three challenges present in EHR data and showed that long-context models are better at mitigating the challenges."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The design of token repetition measurement is less convincing. \n\t- Are the proposed models applicable to ICU patients? If yes, routine vital signs and regular lab tests can repeat a lot, but they can continuously show patients' health status. It is tricky to determine whether they are informative. \n\n\n2. the comprehensiveness of experimental design is limited\n\t- The investigated methods are limited. They are general architectures for foundation models. However, foundation models designed for EHR, such as [1] and [2], are not included.\n\t- The authors claimed that irregular time gaps hinder the performance of the models. This is reasonable because the time gap is not encoded during tokenization. It could be interesting to see whether encoding time information would be helpful for some stratified groups although the gain may be minimal overall.\n\n3. The experiment result reported is limited for a comprehensive understanding\n\t- Prior SOTA mentioned in the manuscript (CLMBR-t-base) is also a transformer-based model. However, the context length of this model is not discussed. Additionally, it is not trained with variable context lengths.\n\t- Table 2 provides stratified results of the same experiment as Figure 1 (b) and (d). However, it is confusing that CLMBR-t-base and Hyena don't appear in this table. \n\t- The author hypothesizes that some degree of copy-forwarding can be helpful by emphasizing important diagnoses. This is observed from the CLMBR-t-base but cannot be validated by other models. Moreover, the Brier score of the CLMBR-t-base seems smaller than other models.\n\t- Standard deviation is not provided when comparing different models. The authors only conduct statistical tests between short- and long- context counterparts. However, neither standard deviation nor statistical testing is reported when comparing different methods.\t\n\t- (Minor) The impact of the long-context and proposed properties on pretraining is not discussed. The downstream tasks are enough to show the conclusion but it will be better to see if these factors affect training process.\n\n4. There are some typos in the manuscript\n\t- In line 139, there's a corrupted citation\n\t- 3.3.3 title: Diseae -> Disease\n\t- Reference [Yang et al., 2023a] and [Yang et al., 2023b] are the same\nReferences:\n[1] Guo, L. L., Steinberg, E., Fleming, S. L., Posada, J., Lemmon, J., Pfohl, S. R., ... & Sung, L. (2023). EHR foundation models improve robustness in the presence of temporal distribution shift. Scientific Reports, 13(1), 3767.\n[2] Fallahpour, A., Alinoori, M., Afkanpour, A., & Krishnan, A. (2024). EHRMamba: Towards Generalizable and Scalable Foundation Models for Electronic Health Records. arXiv preprint arXiv:2405.14567."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Some technical details seem to be missing in the paper. For example, it is unclear what the 14 tasks in EHRSHOT are, whether some tasks have highly imbalanced label distributions, and if the sample sizes for these 14 tasks are the same. Since Table 2 reports the mean performance across all 14 EHRSHOT tasks, the lack of such information makes it challenging to assess the actual performance.\n\nWhen finetuning models, the author wrote \"To be consistent with the original EHRSHOT benchmark, we do not finetune our base models – instead, we train a logistic regression head on top of frozen representations generated for each patient by our base models.\" However, there is no evaluation of CLMBR-T-Base, which is the foundation model released together with EHRSHOT. Why was CLMBR-T-Base not included in the experiment?"
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "The identified properties of EHR data are convincing. The evaluation of the effects of these properties provides valuable insights into using long-context models to model EHR data. The observations and conclusions in this paper will be helpful for future work to build better foundation models for EHR. \n\nThe authors have released the code and plan to release the model checkpoints later. The release of pre-trained and fine-tuned foundational models will benefit the community, considering the small number of such models currently publicly available."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the challenges of using language models to model EHR data. By comparing EHR data with natural language, this paper identifies three unique and significant properties of EHR data (copy-forwarding, irregular time intervals, and disease progression) that make modeling EHR sequences more complex than natural language. To provide evidence for these properties, this paper evaluated the performance and robustness of three language models by varying the repetitiveness, irregularity, and context length of EHR data. The models are pre-trained using a private dataset and evaluated with a publicly available dataset, EHRSHOT."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Although the authors test the performance of different language models, the tokenization strategies of these models remain the same as the one used in EHRSHOT. It would be helpful to see if using other tokenization strategies could improve performance. For example, Section 4.3 indicates that irregular inter-token time intervals are harder to model. This conclusion is based on EHRSHOT’s tokenization, which doesn’t encode time intervals. However, there are other tokenization strategies, such as those used by ExBEHRT and EHRMamba, that do encode time intervals.\n\nThis paper uses only one EHR dataset to evaluate language models, which somewhat limits its conclusions to the EHRSHOT dataset. While I understand that unifying different EHR datasets is highly intensive work, it would be valuable to see whether similar observations consistently appear in other EHR datasets, such as MIMIC."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Could you elaborate on the paper's novelty?\nIs the dataset utilized in the study publicly available?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The analysis is solid in its technical execution and experimental design\n2. Generally, a rich technical/experimental paper"
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper investigates the advantages of long context models in the healthcare domain by evaluating the impact of context length on clinical prediction tasks using four models—Mamba, Llama, GPT, and Hyena—trained on millions of longitudinal EHR records. Additionally, the study assesses model robustness against three properties of EHR data: (1) \"copy-forwarded\" diagnoses that lead to artificial token repetition, (2) irregular time intervals between EHR events causing variable timespans within context windows, and (3) the increasing complexity of diseases over time, which complicates the prediction of later tokens compared to earlier ones. These factors highlight challenges associated with EHR data in clinical prediction tasks. The results indicate that a higher prevalence of each property negatively impacts model performance, while models with longer context inputs tend to be more robust (although not consistantly) to these issues."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1.\tThe analysis is solid in its technical execution and experimental design; however, it does not introduce any new methods, models, or techniques, which limits its novelty.\n2.\tThe importance of long context is questionable given the low frequency of extremely long contexts in EHRs. It seems somewhat expected that providing more information for each sample would improve results.\n3.\tWhile the paper states that disease progression increases token complexity over time. I am not very convinced how this property can be problematic with shorter input context compared to longer input context models. \n4.\tThe paper suggests that model performance improves with longer contexts; however, this is not consistently reflected in the figures. If this is indeed the case, it stands to reason that having more information about a patient (i.e., longer context) should facilitate easier predictions."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "An analysis of the impact of context length on foundation models trained on structured electronic health record (EHR) data for clinical prediction tasks."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024context,\ntitle={Context Clues: Evaluating Long Context Models for Clinical Prediction Tasks on {EHR} Data},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zg3ec1TdAP},\nnote={under review}\n}"
},
"abstract": {
"value": "Foundation Models (FMs) trained on Electronic Health Records (EHRs) have achieved state-of-the-art results on numerous clinical prediction tasks. However, these EHR FMs typically have limited context windows of $<$1k tokens due to computational constraints, which prevents them from modeling full patient EHRs which can easily span 10k's of events. For making clinical predictions, both model performance and robustness to the unique properties of EHR data are crucial. Recent advancements in subquadratic long-context architectures offer a promising solution. However, the application of this long-context paradigm to EHR data has not been well-studied. We address this gap by presenting the first systematic evaluation of the effect of context length on modeling EHR data across four state-of-the-art transformer and non-transformer architectures. We find that longer context models indeed improve predictive performance -- our Mamba-based model surpasses the prior state-of-the-art on 9/14 tasks on the EHRSHOT prediction benchmark. Additionally, we measure robustness to three unique, previously underexplored properties of EHR data: (1) the prevalence of \"copy-forwarded\" diagnoses which create artificial token repetition in EHR sequences; (2) the irregular time intervals between EHR events which can lead to a wide range of timespans within a context window; and (3) the natural increase in disease complexity over time which makes later tokens in the EHR harder to predict than earlier ones. Stratifying our EHRSHOT results, we find that while higher levels of each property correlate negatively with model performance (e.g., a 50% higher Brier loss between the least and most irregular patients), longer context models are more robust to patients exhibiting extreme degrees of each property. Our work highlights the potential for using long-context architectures to model EHR data, and offers a case study on identifying and quantifying new challenges in modeling sequential data that are motivated by domains outside of natural language. We release our model checkpoints, data preprocessing pipelines, and evaluation code."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"ehr",
"foundation model",
"long context",
"clinical prediction making",
"healthcare"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/00c32cc0359f1705224672a6e6e17562a1e6b524.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/183d5815b814ecbd0c106bf494d148d2ef62e91f.zip"
},
"title": {
"value": "Context Clues: Evaluating Long Context Models for Clinical Prediction Tasks on EHR Data"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zgM66fu0wv | IRIS: An Iterative and Integrated Framework for Real-Time Causal Discovery | main | Active | causal discovery;real-time;large language model | causal reasoning | 1;3;3;3 | 4;4;4;3 | 2;1;2;3 | 1;2;2;2 | 3;3;3;2 | 2.5 | 3.75 | 2 | 1.75 | 2.75 | -0.333333 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "What evidence is provided in the paper about the specific mechanism by which information extraction improves the match between the CGMs constructed by human experts and those constructed with the help of IRIS?\n\nWhat evidence is provided in the paper about the match between the judgments of human experts and actual ground truth causal effects?\n\nWhat sort of structured data does IRIS collect and under what circumstances? Can you give some concrete examples of how data sets were improved by the IRIS process of finding documents and extracting numeric data?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper poses an interesting challenge in terms of using LLMs and large corpora of unstructured text to aid the process of constructing causal graphical models.\n\nThe authors attempt a quantitative evaluation."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper reports the design and evaluation of a system intended to assist human analysts with the task of constructing a causal model for a given domain by using text documents about the domain. The system (IRIS) takes a set of variables as input, retrieves relevant documents, proposes missing variables, extracts variable values, organizes data for structure learning algorithms, and learns the structure of those models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The evaluation of IRIS focuses more on replicating human expectations than on accurately estimating causal effects. The “ground truth” for the causal graphical models constructed by IRIS are causal graphical models constructed by a set of human judges. This has several problems. First, this form of evaluation eliminates any possible mismatch between the formalism (graphical models) and actual causal effects in the world. Second, it merely checks (at best) whether the human judges reproduce what an LLM can extract from the expressions of causal judgments in textual training data. This avoids a key question: Whether the training data of LLMs expresses accurate causal knowledge. One of the key goals of causal inference is to provide a source of information about interventional effects that is separate from expert judgment. IRIS appears to reinforce those judgments, rather than check them.\n\nThe authors appear to show *that* the method improves accuracy, but not *why* it improves accuracy. Until a mechanism is clearly described and proven by detailed experiments, the results are unconvincing. On its face, it seems unlikely that sufficiently accurate quantitative data could be extracted from large text corpora and that such data would improve the construction of accurate causal graphical models.\n\nThe authors describe the construction of cyclic models, but they do not clearly define the semantics of those models. The introduction says: “Specifically, this hybrid approach allows cycles in causal graphs, thereby discarding the *acyclicity* assumption.” Accurate construction and use of cyclic models requires much more than simply *allowing* cycles. Instead, any use of cyclic models requires that the authors clearly define a semantics for cyclic graphs, as well as a corresponding semantics of inference in such graphs. They don’t do this. Thus, it is unclear what their cyclic models mean, in a formal sense.\n\nA related problem is that the authors do not define a semantics of *temporal* causal models. Such models pose additional challenges. One major challenge of doing this is to define a consistent time scale among the variables among which feedback occurs. In many real-world systems, some cycles of feedback occur in seconds and other cycles of feedback occur over days or months. These varying timescales have vexed many researchers who attempt to construct a useful semantics for cyclic causal models. The authors don’t even mention this or other similar challenges (e.g., hierarchical dependence, aggregation, etc.).\n\nThe authors incorrectly claim to relax the causal sufficiency assumption because of the effects of the IRIS component that proposes new variables. In reality, IRIS's ability to propose additional variables doesn’t relax the assumption. Instead, it makes it more likely that the assumption is met, provided that the added variables really are latent confounders. The authors should state this more clearly, and they should provide evidence in the paper that the added variables frequently correspond to latent confounders.\n\nThe authors do not provide evidence that the discovered variables satisfy necessary assumptions of causal graphical models. Variable definition is a major challenge of constructing such models. Specifically, variables in valid causal graphical model must meet a variety of assumptions, including the (quite challenging) assumption of modularity, also referred to as “autonomy” or “independence of causal mechanism”. Any process that purports to automatically discover useful variables would need to show that the discovered variables are modular (at a minimum). There is no evidence provided that the discovered variables meet this (or any other) standard.\n\nThe authors do not appear to effectively address key challenges of extracting structured data from text. Automatic extraction of structured data from large text corpora poses a variety of potential pitfalls, including variation in the definition of units and variables, heterogeneity in the conditions under which data are gathered (resulting in more, rather than less, latent confounding), and biased sampling (corresponding to conditioning on a collider and thus posing a new threat to internal validity). The authors don’t mention these threats to validity or how they address them.\n\nIt is unclear how the collection of data from documents is even possible. Essentially all traditional observational studies gather data from structured sources (e.g., national census data, surveys, etc.) rather than from text documents. For the example cited in the paper (“smoking” AND \"cancer” AND “pollution”), it would seem unlikely that most text documents would contain specific instances from which useful data could be extracted (e.g., “John Smith smoked 15 cigarettes a day for 25 years and lived for most of that time in Pittsburgh which had 15 parts per million of pollution during that period”). The authors should make much clearer what sort of numeric data is appearing in documents and how it can be effectively extracted.\n\nOne writing issue interferes with effective reading of the paper. The authors provide citations without parentheses or other notation, making it difficult to separate citations from ordinary text on first reading. The authors should revise their citations to use parentheses (e.g., \\citep)."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "Are extracted values of variables always binary? If not, how were precision and recall calculated for multclass variables?\nHow does the prompt in A.5 relate to value extraction in the intuitive sense?\nGiven how the values of variables in IRIS are extracted, is there a justification of the use of causal discovery algorithms to structured data produced by IRIS?\nWhere did the list of variables presented to the human experts in A.7 come from?\nAren't the kind of documents in AppleGastrome very (description of a single kind of apple and its attributes) very different from what one would expect in e.g. an epidemiological document?\nYou state \"This selection is based on GES achieving the highest average F1 score and Normalized Hamming Distance (NHD) ratio across all five datasets, as demonstrated in Section 6.\" I don't see any test of GES by itself in section 6, and GES does not have the highest F1 and Normalized Hamming Distance when it is part of a hybrid with IRIS. What did you mean here?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The authors put a good deal of work into testing their algorithm. They took sets of variables from the epidemiology literature, and recruited 3 experts to compare gold standard causal graphs to compare their output to. Much of the exposition about how the algorithm worked was admirably clear, with a couple of glaring exceptions. They put effort into showing that each part of the hybrid algorithm contributed to the overall result. The results that they reported are limited, but given the cost of creating gold standard graphs, understandable. The results showed that their hybrid algorithm did outperform competitors."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "IRIS is an algorithm for using LLMs to construct causal graphs. IRIS starts with an initial set of variable Z,, and then uses LLMs to (i) collect relevant documents,(ii) extract structured data about the original set of variables, (iii) apply causal discovery algorithms to the structured data to create a causal graph G, (iv) extract more variables Znew related to the original set of variables Z, (v) extracts causal relations between Z and Znew, and (vi) merges the original graph G with the new causal relations. IRIS is tested on 5 different sets of initial variables taken from the epidemiology literature by comparing the graphs output by IRIS to causal graphs constructed by epidemiological experts. IRIS is found to outperform previous LLM algorithms."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The authors gave evidenc that IRIS improved the construction of causal graphs as compared to previous LLM algorithms, but it is unclear from the paper why that is.\n\nThe paper is confusing about how value extraction works, so I may have misunderstood it. If so, some of the following objections may be incorrect. The structured data that comes out of value extraction is placed into a d x n table, call it Table 1, where the number of variables is n, and d is the number of documents. For a given variable and a given document, the LLM is asked:\n\n We have a variable named ’{var}’. The value of variable ’{var}’ is True or False.\n True indicates that the existence of ’{var}’ can be inferred from the document, whereas False suggests that\n the existence of ’{var}’ cannot be inferred from this document.\n Based on the document provided, what is the most appropriate value for ’{var}’ that can be inferred?\n\nIt is unclear whether the value extraction always forces the output to be binary. The prompt certainly makes it sound that way. However, the article applies value extraction to documents in which variables have 3 values, and makes it sound like it is supposed to extract the values of the variables. It is not clear why asking whether the existence of a variable can be inferred from a document would extract the value of a variable in that document. Or if a document said \"Flouride does not cause cancer\", whether it is possible to infer the existence of fluoride (or cancer) from that document. They report precision and recall for the case where the variables in the document had 3 values, so more information about what they meant by precision for this multiclass case would be helpful.\n\nThe value extraction was applied to the AppleGastrome and Neuropathic data sets. The AppleGastrome data set was created by using an LLM to create documents which were reviews of apples, where each document contained a description of the values of several features of an individual kind of apple, and whether the kind of apple was considered good. While IRIS outperformed another algoritm in extracting values of features in this case, this dataset seems very different from the kind of documents one would typically expect in epidemiology; those documents would not be descriptions of a single person and their attributes, so what the value extracted from such a paper should be is less clear. One thing that would help is to explain the AppleGastrome data looks like in more detail - I had to go to the original paper to get a sense of what the documents actually looked like.)\n\nThis raises the question of the use of the causal discovery algorithms on structured data. The structured data that is input into causal discovery algorithms in IRIS is very different from the usual kind of data the algorithms are designed for. In the usual setting, given 5 variables for example, a sample of units with values for those 5 variables would be measured, and the values of each of the variables for each of the units would be put into a table, call it Table 2. \n\nTable 1 and Table 2 are very different. Table 2 can have arbitrary values for the values of its variables, while Table 1 can only have True or False (if I understand correctly). I don't see how recording a 10 for how many years someone has been smoking relates to a True of False answer to the question of whether it is possible to infer from some document that smoking exists (or precisely what it would mean to infer the existence of smoking from a documentt.) Causal discovery algorithms are intended to work on tables of the kind described in Table 2. I don't see how they could be expected to work on tables of the kind described in Table 1, if only because a great deal of information is being thrown away when variables that can have many different values are being forced to be binary. They also depend on data being i.i.d., whereas the documents may well be dependent on each other, if for example one documents cites another. Also the documents are being selected on the basis of the terms they contain, so that could introduce selection bias. If the values extracted are always binary, much information is being thrown away, which could also affect the causal discovery algorithms.\n\nIn the examples of applications of IRIS, while IRIS did find other variables to add to the original set of variables, there were actually very few confounders among the new variables. That implies that the causal discovery algorithms could be relatively accurate in these examples, but one would not expect that to be true in general. \n\nThe way that information about new variables is integrated into the causal graph produced by the causal search algorithm is naive. For example, a statistical algorithm might orient edges as A -> B <- C because A and C are independent. But if background knowledge indicates that A and C are causally related, there is now a conflict between the background knowledge and the current orientations (because if A and C are dependent, the evidence that the orientations are A -> B <- C is gone.) One should not simply add the edge A -> C, but also remove the orientations of A ->B and B <- C. Also if A -> B is in the DAG, and a new variable X is added by IRIS along with the informaton A -> X -> B, it is not clear whether the edge A -> B should also be removed. (A is a direct cause of B relative to the set of variables without X, but may or may not be a direct cause of B relative to the set of variables with X.) How should this be handled?"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See Weaknesses."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "•\tThe paper addresses an important problem, showing potential by integrating various existing techniques.\n\n•\tIt includes extensive experimental evaluations of causal discovery using publicly available data, showcasing how the potential of the proposed framework."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a framework for causal discovery that combines several methods, including Google searches for retrieving relevant documents, large language models (LLMs) for identifying known relationships, causal discovery techniques for uncovering causal links, and variable abstraction for detecting unobserved causes of initial variables."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "I have concerns regarding the use of causal discovery methods, such as PC, GES, and NOTEARS, on the value-extraction dataset:\n\n1.\tApplicability of Causal Discovery Methods: Causal discovery methods are typically designed for observational data derived from naturally occurring or “real-world” events, often involving measured variables over time or across conditions, such as in longitudinal studies or surveys. However, the value extraction data here is aggregated from documents and lacks the context of natural experiments. The relationships identified among terms represent co-occurrence rather than causation. It seems unlikely that true causal relationships can be reliably uncovered from such summarized data.\n\n2.\tAssumptions of Causal Discovery Methods: Causal discovery methods require assumptions, such as acyclicity and causal sufficiency. When these assumptions are not met, strong justification is necessary to validate that the results remain causal. This paper appears to overlook these assumptions and does not provide evidence that the results are indeed causal.\n\n3.\tExperimental Results and Effectiveness: The experimental results indicate that causal discovery methods alone do not work effectively here; the highest-performing methods include Verified Causal Relations (VCR), suggesting that pre-verified causal knowledge is critical to success.\n\nThe experimental comparisons and conclusions raise further issues:\n\n•\tIn Tables 3, 4, 5, 6, and 7, the methods reported vary inconsistently. For instance, some tables include IRIS (Llama)-PC + VCR and IRIS-PC+VCR, while others list IRIS (Llama)-GES + VCR and IRIS-GES+VCR, and yet others have IRIS (Llama)-NOTEARS + VCR and IRIS-NOTEARS + VCR. It is scientifically unsound to selectively report results across datasets in this manner.\n\n•\tThere are a total of 10 IRIS variations and 3 comparison methods. Concluding that IRIS outperforms other methods based on one variation performing better than the comparison methods in a dataset introduces a potential for bias due to multiple comparisons."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- It is unclear to me why acyclicity is a limitation. What does acyclicity means in terms of causal-effect relationships? What exactly is limited by making this assumption? Is this a limitation in general or in specific applied domains?\n- Authors state that: \"Many causal processes are known to contain feedback loops\" . This is true only if we consider \"processes\", that is, time. This should be made explicit. Is time modeled implicitly? Are the authors disregarding the difference between static and dynamic causal models?\n- The existence of an equivalence class is given by specific parametric assumptions: there are probability distributions that are known to produce indentifiable causal graphs, i.e. DAGs. Furthermore, there are algorithms that produce DAGs, not CPDAGs, by design. Why are equivalence classes a problem if they are an essential part in describing the cause-effect relationships?\n- Why the notation differs from the references (e.g. Pearl)? For instance: the usual notation is \" -> \", not \" <- \"; D, X, G = (V, E) is found in the references; X bold not defined; G hat s not defined; NHD should be better defined separately.\n- How can we assure that the retrieved documents are relevant to our specific context? How can we assure that those are not fake? scholar.google.com, arxiv.org contains also non-peer reviewed papers, those are far from being \"reputable academic repositories\".\n- Are we implicitly assuming that variables are categorical only?\n- Can the authors provide a formal definition of \"veracity\" in the context of causal graphs?\n- LLMs are trained on the very same papers that describe the datasets authors used during evaluation. How can we test a model that has been trained on the train set?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "- An iterative approach for causal discovery allows to stop whenever a sufficiently stable solution is found, lowering the computational complexity.\n- Automated data collection can be useful to bootstrap the causal discovery procedure.\n- Missing variables proposal is an interesting approach to causal insufficiency that should be investigated more."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "Authors propose a novel causal discovery technique called IRIS, an Iterative Retrieval and Integrated System for Real-Time Causal Discovery. This algorithm start from a set of variables and search the web to retrieve documents relevant to these keywords. It then proceeds to extract variable values (namely, the levels of a categorical variable) and extract keywords frequencies across documents. These quantities are used to construct a causal graph. From this initial graph, potential missing variable are identified and the process is repeated and the graph expanded. This approach relies on LLMs for both documents retrieval, value extraction and missing proposal. These steps are evaluated w.r.t. different benchmarks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The data collection and value extraction is performed by querying large databases (search engines) with some coarse queries based on iterative keywords reduction.\n- In this paper, the \"statistical causal discovery\" is used to compute relationships based on terms-documents frequency. It has nothing to do with the literature cited by the authors that computes statistical properties based on actual data about the variables of interest.\n- LLMs are trained on a large number of documents, we do not have any guarantees that they are not trained on the very same papers used to publish the datasets on which this method is evaluated. We cannot conclude anything about generalization and validity of the proposed approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We propose a novel framework designed to automatically collect observational data, leverage existing knowledge to enhance causal discovery results, and propose missing variables."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024iris,\ntitle={{IRIS}: An Iterative and Integrated Framework for Real-Time Causal Discovery},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zgM66fu0wv},\nnote={under review}\n}"
},
"abstract": {
"value": "Causal discovery is fundamental to scientific research, yet traditional statistical algorithms face significant challenges, including expensive data collection, redundant examination of known relations, and unrealistic assumptions. Additionally, while recent LLM-based methods excel at identifying commonly known causal relations, they fall short in uncovering novel relations. We introduce IRIS (Iterative Retrieval and Integrated System for Real-Time Causal Discovery), a novel framework that addresses these limitations. Starting with a set of initial variables, IRIS automatically retrieves relevant documents, extracts variable values, and organizes data for statistical algorithms in real-time. Our hybrid causal discovery method combines statistical algorithms and LLM-based methods to discover existing and novel causal relations. The missing variable proposal component identifies missing variables, and subsequently, IRIS expands the causal graphs by including both the initial and the newly suggested variables. Our approach offers a scalable and adaptable solution for causal discovery, enabling the exploration of causal relations from a set of initial variables without requiring pre-existing datasets."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"causal discovery",
"real-time",
"large language model"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/41b4426dc9f2f8290a2768a62947979215287130.pdf"
},
"presentation": null,
"primary_area": {
"value": "causal reasoning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/2d85e67d74b156c9c83bd9875d1e7cddd833a3fa.zip"
},
"title": {
"value": "IRIS: An Iterative and Integrated Framework for Real-Time Causal Discovery"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zgXGNXkC0F | Mitigating Multimodal Hallucinations via Gradient-based Self-Reflection | main | Active | Hallucinations;MLLMs;Gradient-based Analysis | applications to computer vision, audio, language, and other modalities | 3;3;5;8 | 4;5;4;3 | 3;2;2;3 | 3;3;2;4 | 3;2;3;3 | 4.75 | 4 | 2.5 | 3 | 2.75 | -0.863868 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": null,
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": null,
"primary_area": null,
"questions": null,
"rating": null,
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": null,
"summary": null,
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": null,
"withdrawal_confirmation": {
"value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors."
}
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "- Could you provide more details on why the Manhattan (L1) norm was specifically chosen for token influence analysis? Have other norms been tested, and if so, how did they compare?\n- The choice of early stopping thresholds (e.g., 7% for LLaVA, 25% for InstructBLIP) lacks detailed justification. Could you provide insights into how these values were determined and if they are consistent across different models?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper introduces the \"token influence\" method, which uses gradient norms to measure the sensitivity between input and output tokens. This analysis reveals the model's biases and sheds light on its internal behavior, especially regarding spurious correlations.\n- The authors provide a CHAIR metric component analysis, which can be interpreted as evidence of mitigation for each bias—text-visual, co-occurrence, and long-term. This approach offers an indirect yet insightful perspective on the effectiveness of bias mitigation, supporting the method's unique contribution by showcasing its targeted impact on these specific biases."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of hallucination in Multimodal Large Language Models (MLLMs), proposing a novel method called Gradient-based Influence-Aware Contrastive Decoding (GACD). The authors identify three biases that contribute to hallucinations: text-visual bias, co-occurrence bias, and long-term bias. The GACD method seeks to balance these biases by measuring \"token influence\" through gradient analysis. This approach allows the model to amplify relevant visual tokens while mitigating distractor tokens, particularly effective against co-occurrence hallucinations. Through extensive experiments, the paper demonstrates that GACD improves hallucination mitigation across multiple datasets and model types, outperforming state-of-the-art methods."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- In Figure 1, the cause of hallucination appears ambiguous. The example of \"food\" doesn't strongly exhibit a text-visual bias, making it unclear why this specific example is used to demonstrate hallucination.\n- The writing style is challenging to follow, particularly in the introduction and methodology sections. It has complex sentence structures, making it difficult to immediately grasp the author’s primary argument. Clearer, more straightforward language and improved structure would enhance readability.\n- The paper does provide step-by-step explanations of individual components within the GACD method, such as token influence measurement and contrastive decoding adjustments. However, it lacks a clear, overarching view of how these steps fit together in the overall process. This makes it difficult to grasp how the entire method operates as a cohesive framework. \n- In the experiment tables, particularly for the POPE dataset, the use of terms like \"original\" is ambiguous without clear definitions. Readers might not understand if this term refers to baseline results or some other metric. More specific labels or definitions would help clarify these results.\n- For the LLava-QA90 and CHAIR experiments, the baselines and comparative methods are poorly defined. Each approach seems to have different parameters, which reduces the effectiveness of comparisons in showing efficiency or the superiority of the proposed method. Clearly stating consistent baselines and providing more context for each comparison would improve interpretability."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. Is there any measurement of the computation like decoding time for GACD and other approaches such as Woodpecker[1], VCD[2], and AVISC[3]?\n\n2. It is better to show the component analysis on other datasets such as POPE and AMBER to show the validness of each component.\n\n[1] Yin, Shukang, et al. \"Woodpecker: Hallucination correction for multimodal large language models.\" arXiv preprint arXiv:2310.16045 (2023).\n\n[2] Leng, Sicong, et al. \"Mitigating object hallucinations in large vision-language models through visual contrastive decoding.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024.\n\n[3] Woo, Sangmin, et al. \"Don't Miss the Forest for the Trees: Attentional Vision Calibration for Large Vision Language Models.\" arXiv preprint arXiv:2405.17820 (2024)."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The concept of employing gradient estimation to mitigate hallucination is innovative.\n\n2. Empirical studies on token influence, encompassing text-visual bias, co-occurrence bias, and long-term bias, are solid and insightful.\n\n3. Extensive experiments across various hallucination-related benchmarks underscore the effectiveness of the proposed approach on hallucination reduction."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a novel decoding strategy, Gradient-based Influence-Aware Contrastive Decoding (GACD), designed to mitigate hallucinations in multi-modal large language models without the requirement for additional training. The authors identify three primary sources of hallucination: text-visual bias, co-occurrence bias, and long-term bias. To address these issues, they introduce an innovative technique that balances these biases by utilizing token influence through self-reflective gradient calculations. Notably, their approach tackles co-occurrence bias without necessitating further fine-tuning. Comprehensive experiments reveal that GACD not only effectively reduces hallucination but also achieves superior performance across various multi-modal benchmarks, outperforming existing decoding strategies."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The experimental results perform the performance of GACD on hallucination-related datasets which is not sufficient to show the generalization of proposed method. The authors need to show the model's performance with GACD on comprehensive benchmarks such as MMVet [1], MMBench [2], or MMMU [3].\n\n[1] Yu, Weihao, et al. \"Mm-vet: Evaluating large multimodal models for integrated capabilities.\" arXiv preprint arXiv:2308.02490 (2023).\n\n[2] Liu, Yuan, et al. \"Mmbench: Is your multi-modal model an all-around player?.\" European Conference on Computer Vision. Springer, Cham, 2025.\n\n[3] Yue, Xiang, et al. \"Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "None"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper introduces a unique method, GACD, which uses gradient norms to quantify 'token influence' and self-reflection to balance biases, offering a new perspective on hallucination mitigation in MLLMs.\n- GACD is the first approach that can fully address co-occurrence bias, which is a significant contribution to the field.\n- The token influence analysis allows for a detailed understanding of biases at the individual sample level, which is a step forward from previous methods that relied on overall statistics."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper titled \"Mitigating Multimodal Hallucinations via Gradient-Based Self-Reflection\" presents a novel approach to address the issue of hallucination in MLLMs. The authors identify three primary biases causing hallucinations: text-visual bias, co-occurrence bias, and long-term bias. They propose a method called Gradient-based Influence-Aware Contrastive Decoding (GACD), which leverages gradient-based self-reflection to balance these biases and mitigate hallucinations without additional resources or tuning. The paper claims that GACD is the first method capable of fully addressing co-occurrence bias and provides extensive experimental results demonstrating its effectiveness in improving MLLM performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The paper's validation is based on relatively older versions of MLLMs, which may not represent the current state-of-the-art in the field. The effectiveness of GACD should be tested on the latest MLLMs, such as InternVL2 [1] and Qwen2-VL [2], to ensure that the findings are relevant and applicable to current research and industry standards.\n- While the authors claim that their method is effective against all three types of hallucinations, the experimental section lacks a focused validation on each specific type of hallucination. A more detailed analysis targeting each bias individually would strengthen the paper's claims.\n- The paper relies on benchmarks like COCO for evaluating hallucinations, which may not be comprehensive or up-to-date. The use of more diverse evaluation benchmarks such as HallusionBench [3] could provide a more rigorous test of GACD's capabilities.\n- The effectiveness of GACD may depend on the clarity and importance of visual information relative to the prompt, which could be a limitation in scenarios with complex or ambiguous visual inputs, such as documents and table images.\n\n[1] https://internvl.github.io/blog/2024-07-02-InternVL-2.0/\n\n[2] “Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution.” ArXiv abs/2409.12191 (2024).\n\n[3] “Hallusionbench: An Advanced Diagnostic Suite for Entangled Language Hallucination and Visual Illusion in Large Vision-Language Models.” 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "### Q1. Is there a specific reason why the POPE Adversarial setting in Table 1 performs significantly better?\n\n### Q2. Why were experiments conducted only on the 7B model? How would the proposed method scale with models larger than 13B or smaller models? It would be helpful to understand performance variation across different model scales.\n\n----\n\n### Minor comment: Please use citep instead of cite, and reserve cite for the beginning of a sentence."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "### S1. The effort to categorize hallucination into text-visual bias, co-occurrence bias, and long-term bias is commendable. The classification is convincing, particularly as illustrated in Figure 1.\n\n### S2. The idea of quantifying the influence of each bias is interesting and seems useful. Visualizing the impact of these biases in Figure 2 and Appendix Figure 1 was clear and helpful for understanding the distinctions between them."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper explores the issue of object hallucination in large vision-language models (VLMs) and attributes the phenomenon to three key biases: text-visual bias, co-occurrence bias, and long-term bias. The authors propose a method to quantify the influence of each of these biases by calculating the gradient-based impact of input tokens on the output. Furthermore, they introduce the GACD approach, which balances the influence of hallucination-inducing tokens or applies early stopping to mitigate hallucinations."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "### W1. The experimental validation is somewhat weak. The POPE results in Table 4 of the Appendix show only minor improvements in the Random and Popular settings, and there is no comparison with other methods such as VCD, AvisC, etc., which makes it difficult to assess the effectiveness of the proposed method comprehensively.\n\n### W2. The paper lacks sufficient details regarding the GPT-4V prompt used in the open-ended QA evaluation. Providing a more detailed explanation would improve clarity.\n\n### W3. Regarding hyperparameters, the choice of alpha values (set to less than 5 for discriminative tasks and 3 for generative tasks) lacks justification. A rationale for these values should be provided.\n\n### W4. Early stopping criteria are determined differently for each model, but there is no explanation or ablation study to justify these choices. The lack of grounding for these decisions weakens the methodological rigor, and this applies to both points 3 and 4.\n\n### W5. The paper claims that GACD is the first approach to fully address co-occurrence bias without extra resources or tuning. However, this claim seems questionable. Since co-occurrence bias and language prior often share the same underlying mechanisms, existing contrastive decoding methods like VCD, M3ID, and AvisC seem to address this issue inherently as well. Clarification is needed here."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Gradient-based Influence-Aware Contrastive Decoding is a method that measures input token influence to uncover biases and mitigate hallucinations in MLLMs."
},
"_bibtex": {
"value": "@misc{\nwang2024mitigating,\ntitle={Mitigating Multimodal Hallucinations via Gradient-based Self-Reflection},\nauthor={Shan Wang and Maying Shen and Nadine Chang and Chuong Nguyen and Hongdong Li and Jose M. Alvarez},\nyear={2024},\nurl={https://openreview.net/forum?id=zgXGNXkC0F}\n}"
},
"abstract": {
"value": "Hallucination in Multimodal Large Language Models (MLLMs) occurs when inaccurate text-visual alignments are generated, posing a major challenge for reliable model output. Previous studies have identified three primary biases as major causes of hallucinations: text-visual bias (over-reliance on text over visual details), co-occurrence bias (misleading object correlations), and long-term bias (increased hallucinations in later stages of long sequences). Existing hallucination mitigation methods often rely on visual grounding, which requires additional resources such as scoring systems using another MLLM, and still fail to fully address all biases, particularly co-occurrence bias in visual inputs. We propose Gradient-based Influence-Aware Contrastive Decoding (GACD) to explicitly and jointly balance these biases, thereby mitigating hallucinations. To quantify these biases at the individual sample level, we introduce `token influence'. Since biases are rooted in the training data and become embedded in pre-trained MLLMs, we derive token influence through self-reflection by calculating the gradients from output predictions to input tokens. Notably, GACD is the first approach capable of fully addressing co-occurrence bias without relying on extra resources or any form of tuning. Extensive experiments demonstrate GACD's effectiveness in reducing hallucinations and improving MLLM performance, achieving new state-of-the-art results while providing insights into the visual perception capabilities of these models."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": {
"value": [
"~Shan_Wang2",
"~Maying_Shen1",
"~Nadine_Chang1",
"~Chuong_Nguyen1",
"~Hongdong_Li1",
"~Jose_M._Alvarez2"
]
},
"authors": {
"value": [
"Shan Wang",
"Maying Shen",
"Nadine Chang",
"Chuong Nguyen",
"Hongdong Li",
"Jose M. Alvarez"
]
},
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Hallucinations",
"MLLMs",
"Gradient-based Analysis"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": {
"value": "wang|mitigating_multimodal_hallucinations_via_gradientbased_selfreflection"
},
"pdf": {
"value": "/pdf/bfa7ba4db1c1b7a33f7d10c909dde820e15c47f4.pdf"
},
"presentation": null,
"primary_area": {
"value": "applications to computer vision, audio, language, and other modalities"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/9c3f884e11ce49d537bf3ae93938302567468009.pdf"
},
"title": {
"value": "Mitigating Multimodal Hallucinations via Gradient-based Self-Reflection"
},
"venue": {
"value": "ICLR 2025 Conference Withdrawn Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Withdrawn_Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zgs450VzkU | Test-Time RAG: Enhancing Long Context Understanding in LLMs with Retrieval-Augmented Mechanisms | main | Active | Retreival Augmented Generation (RAG);Personalization;LLM | foundation or frontier models, including LLMs | 1;3;3;5 | 4;4;4;4 | 1;2;2;2 | 1;1;1;2 | 1;1;1;2 | 3 | 4 | 1.75 | 1.25 | 1.25 | 0 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "/"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The authors propose to iteratively rewrite the query to improve search results for extensive personalized context, which is a timely problem of interest, with a significant body of literature."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors propose an iterative query rewriting approach that sequentially and conditionally embed the query with retrieved documents to improve long context understanding."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Overall confused by the claims of the objective of the work and the actual paper body, it is not clear how the work relates to “long context understanding”, “personalised context”, or “specialised task-oriented systems”. The authors proposed a rewriting query scheme with iterative embedding, with substantial details missing (and therefore, conclusiveness), see below. \n\n- There is a substantial amount of work on query rewriting and agentic RAG which is not mentioned at all. There is no comprehensive literature review, with the “motivation” section being almost unrelated. Not able to compare to other related papers like “Query Rewriting for Retrieval-Augmented Large Language Models”, among many others. \n- “Test-time” term is misleading. Similarly, the authors didn’t clearly show benefits of the approach specifically for “long context”, as claimed. The examples do not illustrate long context, only short context.\n- Benchmarks do not seem well suited for the specific problem(s) claimed by the authors. Also, BM25 is not an embedding model.\n- Motivation for the conditional compute not clear, context with long context understanding missing. Seems ad-hoc and not reproducible.\n- A significant number of question arise, which is caused partially by the lack of reference to state of the art methodologies: how does this work compare to reranking methodologies? What is the latency of the pipeline? When does it stop the iteration loop (what are the stop criterion)? What is the effect of these stop criterion? How significant are the small increments in performance claimed by the authors? \n- Lack of overall reproducibility for different datasets"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. What is the architecture used in the bidirectional encoder? How do the authors arrive at this architecture?\n2. Please explain why average of unmasked tokens is better than a weighted sum of the unmasked tokens where the weights can be learned? either a theoretical explanation or an empirical study is preferred.\n3. Clearly differentiate between CE vs CE Filtering only\n4. What are the performance implications of Test-Time RAG? On the infrastructure which you are using (which is not specified?) how much time and GPU memory does CE introduce? Given the gains in Tables 1 and 2 are relatively less, is it worth the compute?\n5. Can statistical tests (even a means test) be done on each of the groups of 3 in tables 1 and 3 highlighting which numbers are statistically better than the baseline and where is CE better than CE Filtering?\n6. What are the time impacts on iterative query rewriting ? What is the distribution of the number of rewrites as per the algorithm? how long does the response take with rewrites vs. without on average?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The paper handles an important problem around long-context RAG. Their approach of using a conditional compute is important and shows the versatility of tasks in a question answering system and how a LLM prompt may not be the best way to do all queries. Query rewriting based on iterative search is an interesting idea too. The problem has practical implications in both research and industry."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper looks at the problem of long-context RAG and provides 3 steps to solve it. Their overall approach, Test-Time RAG considers a custom embedding approach called Conditional Embeddings, uses query rewriting and then use conditional compute. They evaluate their results on a few different datasets showing consistent improvement in results over baseline."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "The paper presents an interesting new perspective on long context retrieval. However the work has some limitations worth considering\n\n1. Section 3.1 introduces conditional embeddings however the authors do not state they are using pretrained embeddings for the same in this section. It reads like this is a custom embedding. Much later, in Table 1 we understand that the authors have used GPT3.5 embeddings and llama embeddings for the same. This makes reading section 3.1 hard without forward referencing\n2. It is not clear why an average across the unmasked tokens in CE embeddings is better than taking a weighted average (with learned weights)\n3. Scalability of solution - how does this solution scale with respect to large datasets of documents ?\n4. I do not understand on how you prevent Algorithm 1 to create irrelevant questions if the retrieval is of poor quality. The approach runs the risk of run away poor results on questions which do not have good answers in the database\n5. Lack of clarity in what is CE and what is \"CE (Filtering only)\" in Table 1.\n6. Lack of clarify that the model is a LLM model used only for question answering\n7. How is F1 score computed? Over how many documents i.e. what is your retrieved k?\n8. Whilst I appreciate the principle behind the conditional compute, it's relevance to the rest of the paper is relatively weak. Also how is the LLM leverage For example for count-based queries the map-reduce paradigm that has been recommended, does it mean the LLM generates the code for map-reduce? if not how will this be implemented?\n9. The example in Figure 6 is a good example of my earlier comment on Conditional Rewrite. If the \"Call me Manana\" is not present in the document repository the query rewrite may tag it to some random group (based on what is retrieved) in the way the algorithm is designed/presented. it is not clear how this run-away is prevented to attach it to something random in what has been retrieved. Similar comment on fig 7/8\n\n\nOverall, while interesting the paper has many unanswered questions. First, it is not clear on the cost vs. benefit analysis for the CE approach. Second, the conditional rewrite has a risk of runaway poor results which need to be addressed. Finally conditional compute is explained but how are some of it implemented is vague. In view of this, this paper in the current form leaves many questions unanswered on its applicability. More clarity on writing is preferable as there is a need to read further to understand the methodology."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "1.\tWhat is the specific connection between the initial focus on personalized and trustworthy AI and the later focus on long-context benchmarks? Could the authors clarify the relevance of these two aspects?\n\t2.\tWhy was the decision made to process context at test time when many changing contexts (e.g., user history) could be indexed incrementally? What benefits does test-time processing offer in these cases?\n\t3.\tHow are the three components (Conditional Embeddings, Iterative Query Rewriting, Conditional Compute) related to each other? Could the authors provide a clearer description of how these components work together within the overall system?\n\t4.\tWhy were most experiments focused on Conditional Embeddings rather than evaluating the full method? What impact do Iterative Query Rewriting and Conditional Compute have when used together with Conditional Embeddings?"
},
"rating": {
"value": 1
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 1
},
"strengths": {
"value": "The starting idea is insteresting: dealing with streaming and changing long context with tailored RAG."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces Test-time RAG, a framework that integrates Retrieval-Augmented Generation (RAG) with dynamic query handling at test time. It aims to improve the long-context understanding of Large Language Models (LLMs) by addressing the limitations of static retrieval and pre-indexed document embeddings. The framework consists of three main components: Conditional Embeddings, Iterative Query Rewriting, and Conditional Compute, which work together to enhance context processing dynamically. The paper evaluates Test-time RAG on several benchmarks, showing improved performance in handling complex, long-context retrieval tasks such as knowledge-intensive question answering (QA)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. **Poor Writing and Organization**: The presentation of the paper is confusing and difficult to follow due to ambiguous and hard-to-read sentences, poor organization, and unclear structuring of key arguments.\n\n2. **Inconsistent Focus**: The introduction starts by discussing personalized and trustworthy AI, but the subsequent sections focus on long-context benchmarks without sufficiently connecting to the original arguments.\n\n3. **Strange Motivation for Test-time Processing**: The rationale for processing context at test time appears weak, especially given that user history or dynamically changing contexts could be indexed incrementally.\n\n4. **Lack of Component Integration**: The paper presents three distinct components (Conditional Embeddings, Iterative Query Rewriting, Conditional Compute) but does not adequately explain their interrelationship. The experiments are also conducted individually rather than for the overall method, which leaves the effectiveness of the integrated system unclear.\n\n5. **Inefficient Indexing**: The experiments on Conditional Embedding involve incorporating the query into the document indexing process, which makes it impractical because the indexed embeddings cannot be reused efficiently.\n\n6. **Weak Experimental Validation**: The experiments provided are not comprehensive or robust enough to fully validate the effectiveness of the proposed techniques.\n\n7. **Lack of Discussion or Justification**: There is minimal discussion on why or how the proposed techniques improve the results, leaving the reader with questions about the validity and significance of the approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 1
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Can you test the time consumption of test-time RAG, compared with standard RAG methods, on these general problems?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The author conducts experiments on 3 RAG datasets with 2 different LLMs and 2 different embedding models to demonstrate the advantage of test-time RAG."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper propose test-time RAG, aiming at constructing more contextual text embeddings to improve the retrieval accuracy. This method consists of 3 technologies: conditional embedding, query rewriting and conditional computing. It conducts experiments on 3 RAG datasets with 2 different LLMs and 2 different embedding models, to demonstrate this method can improve models’ performance."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Poor writing\n\nThe author’s presentation is hard to understand. I cannot understand the description of the methods even after reading 10 times, such as the sentence “utilize encoder-decoder variants which jointly condition context on text appearing before and after a given token in a sequence” (line 164).\n\n2. Impracticable time consumption\n\n“Conditional embedding” needs to encode each document in test-time, which will certainly take extremely much more time. Even though the author proposes “conditional computing” to reduce the latency, it can only function on a narrow range of task types such as counting or KV retrieval. But the author did not compare the time latency with that of standard RAG methods on more general problems. This casts doubt on the practical feasibility of this approach in real-world scenarios.\n\n\n3. Query rewriting may introduce more issues\n\nThe author claims that query rewriting can leverage knowledge in LLM’s pretraining data. However, it can be affected by LLM’s hallucination, which is exactly what RAG technology wants to solve. Therefore, in most RAG scenarios, LLMs are allowed to only use knowledge in the retrieved documents instead of their inner knowledge.\n\n4. The resolution of the figures is low. And the sizes and positions of them are also inapt."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024testtime,\ntitle={Test-Time {RAG}: Enhancing Long Context Understanding in {LLM}s with Retrieval-Augmented Mechanisms},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zgs450VzkU},\nnote={under review}\n}"
},
"abstract": {
"value": "Large Language Models (LLMs) are becoming increasingly pivotal in applications that depend on extensive personalized context, such as conversational agents and specialized task-oriented systems. In these scenarios, effective long-context handling is essential to support agentic tasks and enhance in-context learning capabilities. To address this challenge, we propose a novel integration of Retrieval-Augmented Generation (RAG) techniques with LLMs, designed to enhance their ability to effectively manage and utilize large contextual information. Our methodology, Test-time RAG, enriches LLMs by dynamically generating contextual embeddings and utilizing semantic search to retrieve the most relevant document chunks at test time. This process preserves the context's meaning and enhances the model’s responsiveness and accuracy in knowledge-intensive Question Answering tasks. We evaluate our approach using three benchmarks that capture our system's ability synthesize and retrieve information across extensive texts: HotpotQA (+9.87%), QASPER (+3.15%), and Natural Questions (+7.29%). The results indicate a substantial improvement in handling complex queries: demonstrating the effectiveness of Test-time RAG in maintaining high performance across varied document lengths and complexities."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Retreival Augmented Generation (RAG)",
"Personalization",
"LLM"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/68c724b865f71630d02ef6fe3e4af40f0e13029a.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Test-Time RAG: Enhancing Long Context Understanding in LLMs with Retrieval-Augmented Mechanisms"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zhFyKgqxlz | Exact Community Recovery under Side Information: Optimality of Spectral Algorithms | main | Active | Community Detection;Spectral Algorithms;Side Information | learning theory | 3;6;6;8 | 4;4;3;3 | 2;3;3;4 | 2;3;3;4 | 3;3;3;2 | 5.75 | 3.5 | 3 | 3 | 2.75 | -0.70014 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 4
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "My only concern with this paper has to do with the formatting issue raised in the weaknesses section. If this is a typical approach that is commonplace and accepted in ML conferences, then I am willing to cede that point."
},
"rating": {
"value": 8
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 4
},
"strengths": {
"value": "This is a well-written paper -- there are quite a few moving parts\ninvolved in this work, but the explanation was clear and easy to\nfollow. The authors ought to be commended for clear technical\ncommunication. I do have onemajor misgiving about this, but that \nis saved for the weaknesses section.\n\nI am admittedly not too familiar with the literature on exact recovery\nof SBMs beyond the paper of Abbe (2018), but the results here seem\nquite good, and fit into the surveyed prior results nicely. I\nappreciate how the recovery guarantees are accompanied by specific\nalgorithms, rather than proving detectability in merely an abstract\nway."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper considers the problem of recovery of two-community block\nmodels. It sits in the context of a broad literature on detectability\nlimits, exact algorithms, etc. The particular setting considered by\nthe authors is one of a general sort of block-model random matrix\nbased on a given partition/distribution model, along with a vector of\n\"side-information\" that is aligned with the block structure in some\nway. The authors study the information theoretic limit of this\ndetection problem via simple spectral methods, and by studying\n\"genie-aided estimators.\""
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My critique of this paper is not one aimed at the strength of the\nresults -- indeed, I found the results to be nice, and the exposition\nto be quite good. However, I have the sense that the main body of the\npaper was merely a rough overview of the \"real paper\" hidden in the\nappendix. I understand that for space reasons, lots of ML papers have\nto put substantial content in the supplementary material. I don't have\na deep argument for why this is the case, but I get the feeling that\nthis work is better-suited for a journal format. For instance, one of\nthe main constructed algorithms that attains exact recovery\n(Algorithm 2) is only given in the supplementary material. It would be\nmore appropriate to structure the paper so that the algorithm is given\nand explained in the body of the paper, with other variants possibly left\nto the supplement.\n\nI should note that although I am a reader of theory papers of this\nsort, I have not reviewed many of them in the context of ML\nconferences, so I am ultimately willing to defer to the other\nreviewers on this point.\n\nOther notes:\n\n(I am not holding the following against you in any way -- deadlines\ncan be tough!)\n\nPlease check the paper for spleling mistakes -- I spotted a few such\nas \"Gaussain Features\" and \"shifing the eigenvector combination.\""
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. On Page 6, $L(t)$ and $I^*$ are introduced to characterize the information theoretic limit. Are there any general sufficient conditions that ensure $L(t)$, and consequently $I^*$, are well-defined? \n\n2. In Section 5.1, degree-profiling algorithm is mentioned as having certain caveat. However, it is unclear to me what this algorithm refers to. Perhaps adding a brief summary of the key points of this algorithm would improve clarity.\n\n3. On page 9, it is stated that an approximate linear combination coefficients $c_i$ such that $w\\approx \\sum_{i=1}^K c_i/\\lambda_i^* u_i^*$. Is there any intuitive explanation for why $w$ is approximately in the span of $u_i^*$? Is this result purely from the computations of the specific model (Gaussian and Bernoulli), or is there a broader rationale suggesting that this holds across more robust models? \n\n4. Regarding the algorithm, do the parameters $c_i$ and $\\gamma$ need be explicitly computed from the model parameters, or is there a possibility to estimate them using only the adjacency matrix $A$ and the side information?\n\n5. This final question may not be directly related to the problem considered in the paper. Dreveton et al (2024) provides the information theoretic limit of exact recovery in terms of $I^*$. I am curious whether the regime $I^*<1$ can be further split. Specifically, could we identify a sub-regime where exact recovery is achievable even without the side information, and another sub-regime where side information is essential with some specific conditions?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "The paper is well-structured, with clearly stated objectives and supported arguments. It provides background definitions and results on related models, which greatly aids in understanding the topic. The novel approach of connecting the eigenvectors to the genie-aided estimator is particularly intriguing, contributing to the design of an efficient spectral algorithm. Additionally, the rigorous mathematical derivations are well-organized in the appendix, offering solid theoretical support."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of exact community recovery for general two-community block models with node-attributed side information. The authors consider both Gaussian and Bernoulli matrix models for the edge observations, as well as a general side information channel, including Gaussian Features and Binary Erasure Channel. Based on the recent work of Dreveton et al. (2024) on the information-theoretic limit on this problem, the authors demonstrate the algorithmic achievability using a spectral algorithm that incorporates side information with the eigenvectors of the observed edge matrix. The main technical novelty lies in establishing a rigorous connection between the spectral estimator and the genie-aided estimator by the results on the first-order approximation of the eigenvectors in Abbe et al. (2020)."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "Due to the page limit, I feel that some important details have been deferred to the appendix. For instance, I would like to see a more rigorous discussion in the main context on the special form of the genie score vector, $z^* \\approx Aw + \\gamma I_n$; how $w$ can be derived from the model, any regularity conditions needed for this approximation, and whether this form extends to models beyond Gaussian and Bernoulli. I suggest moving part of the theoretical discussion into the main text and possibly relocating some of the preliminaries to the appendix.\n\n Additionally, I am uncertain about the robustness of this algorithm, as the coefficients $c_i$ relating $w$ to eigenvectors must be computed from model parameters. However, in practice, we typically observe only the adjacency matrix and side information, without access to underlying parameters like $a_1, a_2, b$ in ${\\rm SBM}_n(\\rho, a_1,a_2, b)$. A discussion or clarification on this would be beneficial.\n \n Another limitation of this paper is the lack of a numerical study. I suggest including simulation examples that compare the proposed algorithm’s performance (in terms of accuracy and computation cost) with the existing two-stage algorithms. This would strengthen the paper by demonstrating the practical advantages of this approach."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "- I understand this is a theoretical work, but have the authors verified their algorithm on any dataset? \n- What is the computational complexity in terms of n?\n- Is it possible to generalize this work to the case of multiple communities? If not, what are the technical difficulties?"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper is well organized and easy to follow. The notations and definitions are mostly self contained.\n- The authors provide the statistical bounds for exact recovery in the case of the Rank One Spike (ROS) model and the Stochastic Blockmodel (SBM)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper studies block models with two communities, where each community has a general form of edge distribution. Side information on nodes is also considered. The authors specifically focuses on the case of Bernoulli and Gaussian distributions. The paper then proposes a variant of spectral algorithm to recover the community structure in the model, and provides statistical bounds for exact recovery of the signal."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "My biggest concern is that some of the language in the paper seems overclaiming. For example, the authors state that the paper proposes a \"unified model\" and \"unified proof framework\". The amount of novelty and contribution is unclear to me. Can the authors specify, preferably in bullet points, their new results, algorithm, bounds, proof techniques, or experiments?\n\nBeyond that, I have some more detailed questions:\n- The paper aims to provide a unified framework of analysis for at-most-two-community block models, but the results in Theorem 1 and 2 only target two specific and arguably \"simpler\" classic models. Why is that? Is it possible to give a bound based on the definition of P+, P-, and Q, for instance? \n- Section 3.1: Does the information-theoretic bounds in Prop 3.1 contain anything novel, or just cited as is from [Dreveton et al., 2024]? If it is the latter, I strongly believe it should be moved to the preliminary section and not put in the \"Main Results\".\n- Theorem 1 and 2: The exact recovery condition depends on I* > 1. Is there any way to calculate this efficiently? From a reader's perspective, I'd love to see many examples of two community models and know how their parameters are related to this I*. For example, can the authors relate I* to p and q in the SBM?\n- The proofs like the concentration inequalities and the spectral analysis in the paper seem standard. In fact the union bound strategy feels similar to the ones in E. Abbe's and Dreveton's work. What are the technical contribution of this paper? Is there any proof technique that can be used in the future?\n\nMinor:\n- Bern() used but not defined on the first page."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "See weaknesses above. Also, the paper could benefit from a careful proofreading. For example:\n\n1. Gaussain -> Gaussian on page 3.\n2. In Algorithm 7, $\\alpha_n$ is not defined. Should it be $\\alpha$?\n3. In Algorithm 6, the input section ends with two periods."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper is well-organized, and the arguments flow logically.\n\n2. The claims are solid, and the proposed framework covers a broad range of interesting models.\n\n3. The results provide theoretical insights into community detection with side information and demonstrate the optimality of spectral methods."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper addresses the problem of exact community recovery in two-community block models with node-attributed side information. The primary contribution is the design of an optimal spectral algorithm that achieves the information-theoretic limits in various settings. The general block model in this paper encompass rank-one spike Gaussian model and SBM, and the side information incorporates GMM and two types of \"noisy'' labels."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Some related literature is missing, e.g., [1]. Specifically, the SBM model with $\\rho = 1/2$ and Gaussian Features aligns with Section 4 in [1] and should be cited and compared. Additionally, for the SBM with GF where $\\rho = 1/2$ and $a_1 = a_2 = a$, the expression for $I^*$ takes a simple form in Equation (4.4) of [1]. Providing explicit forms of $I^*$ for other models would enhance interpretability of the results.\n\n2. For SBM with BEC/BSC, the algorithms assume prior knowledge of parameters $(\\rho,\\varepsilon, \\alpha)$. The paper should mention this clearly and discuss why it's reasonable to assume this prior knowledge, or suggest how these parameters could be estimated. From a theory standpoint, achieving optimality might require paying a cost for adapting to these parameters, especially in regimes $\\varepsilon = n^{-\\beta}$ and $\\alpha = n^{-\\beta}$ for $\\beta>0$. \n\n3. The two-community assumption limits the algorithm’s practical use. While I understand that the optimality of the spectral algorithm depends on this assumption, the statement in Appendix A, \"The entire framework of genie-aided estimation naturally generalizes to the multi-community case,\" is confusing. Can the authors elaborate on how the spectral algorithm could be modified, for example, to handle $K=3$ communities?\n\n4. There are no experiments. It would be helpful to include numerical experiments demonstrating the algorithm’s performance on the specific models considered.\n\n[1] Abbe, E., Fan, J., & Wang, K. (2022). An $\\ell_p$ theory of PCA and spectral clustering. The Annals of Statistics, 50(4), 2359-2385."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "We present optimal spectral algorithms for exact recovery problems that can incorporate node-attributed side information, for popular Gaussian and Bernoulli matrix models including SBM, submatrix localization, and Z_2 synchronization."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024exact,\ntitle={Exact Community Recovery under Side Information: Optimality of Spectral Algorithms},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zhFyKgqxlz},\nnote={under review}\n}"
},
"abstract": {
"value": "We study the problem of exact community recovery in general, two-community block models, in the presence of node-attributed *side information*. We allow for a very general side information channel for node attributes, and for pairwise (edge) observations, consider both Bernoulli and Gaussian matrix models, capturing the Stochastic Block Model, Submatrix Localization, and $\\mathbb{Z}_2$-Synchronization as special cases. A recent work of Dreveton et al. 2024 characterized the information-theoretic limit of a very general exact recovery problem with side information. In this paper, we show algorithmic achievability in the above important cases by designing a simple but optimal spectral algorithm that incorporates side information (when present) along with the eigenvectors of the pairwise observation matrix. Using the powerful tool of entrywise eigenvector analysis [Abbe et al. 2020], we show that our spectral algorithm can mimic the so called *genie-aided estimators*, where the $i^{\\mathrm{th}}$ genie-aided estimator optimally computes the estimate of the $i^{\\mathrm{th}}$ label, when all remaining labels are revealed by a genie. This perspective provides a unified understanding of the optimality of spectral algorithms for various exact recovery problems in a recent line of work."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"Community Detection",
"Spectral Algorithms",
"Side Information"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/3d21772225d991e0a7149c156224cbe9e5db29b4.pdf"
},
"presentation": null,
"primary_area": {
"value": "learning theory"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": {
"value": "/attachment/9151a49b03cee04d7fc0aafc5b891d03c76fdb9d.pdf"
},
"title": {
"value": "Exact Community Recovery under Side Information: Optimality of Spectral Algorithms"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zhxATDLAmJ | Loss2Net: Loss Meta-Learning for Regression with A-priori Unknown Metrics | main | Active | loss meta-learning;loss-metric mismatch;system management;unknown metric;transfer learning | transfer learning, meta learning, and lifelong learning | 3;3;5;5;6 | 5;5;4;3;3 | 2;2;2;3;3 | 2;2;2;2;2 | 2;2;2;3;3 | 4.4 | 4 | 2.4 | 2 | 2.4 | -0.931695 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "1. The proposed method relies on high-quality system response data, how does it perform when the system response data contain noise?"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The paper studies an important problem to use meta learning in real-world regression problems with unknown loss functions.\n\n2. The paper proposes a method to learn non-differentiable, multi-dimensional losses without prior assumptions."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents Loss2Net, a meta-learning framework for regression tasks with unknown performance metrics, where traditional loss functions (like MSE) may be ineffective. Loss2Net jointly trains a predictor and a loss-shaper network, enabling it to learn both optimal predictions and the corresponding loss function directly from system feedback. This approach accommodates complex, non-differentiable, and time-correlated loss functions. Through experiments in the power grid and telecom resource management, Loss2Net shows improved adaptability over standard methods, demonstrating its potential for real-world engineering applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed method relies on high-quality system response data, which may limit robustness with noisy data.\n\n2. The dual-component architecture and the joint optimization may result in higher computational requirements and complexity, making it less suitable for lightweight or real-time applications."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "-How can the real performance metric M be derived from y ̂ and v to calculate equation (2) in your experiments? \n-The definition of the metric is not clearly explained, and the variables in the datasets are not sufficiently stated to match the notation in the paper.\n-The paper lacks several key visualizations that would better illustrate the motivations behind the work, making it harder for researchers outside the engineering field to fully grasp its advantages. To enhance readability, please consider using more symbols and visual aids where appropriate."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- This paper proposes the Loss2Net model, in which a predictor and a loss shaper are designed separately. The predictor consists of a set of individual regressors, and the loss shaper is designed with an INR structure, which can learn geometric multidimensional representations based on coordinates. \n- The Loss2Net model proposed in this paper does not require assumptions about the specific structure of the loss function, making it capable of handling real-world applications where the relationship between predictive variables and performance metrics is unknown in advance. \n- Experiments were conducted on multiple real-world applications to demonstrate the advantages of Loss2Net."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper leverages loss meta-learning to address the issue of loss-metric mismatch by proposing a Loss2Net model designed to handle common regression tasks in real-world systems. The model is capable of jointly learning both the actual regressor and the loss function without any assumptions about the structure of the loss function. Experiments were conducted on several real-world cases, demonstrating that the model can learn previously unidentified loss functions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- The motivation of this paper is not clearly explained. In the introduction, the issues with loss functions like MSE in real-world applications are described only in text, without any visual representation to provide a clearer illustration. This makes it unfriendly for researchers outside the engineering field and does not effectively convey the motivation of the paper.\n- In Section 3.1, the paper alternately or iteratively optimizes the loss shaper and regressor through (2) and (3), but these two formulas do not form a bi-level optimization problem because the inner-layer regressor parameter θ^pdoes not depend on the outer-layer parameter $θ^l$. Therefore, it cannot be considered as meta-learning.\n- In Section 3.2 of the paper, the authors only explain in text how the regressor and loss shaper are designed, and their contributions are mixed with existing research, failing to clearly highlight their own contributions.\n- In the experimental section of the paper, the authors only describe the definitions of the two real-world application metrics in text but do not provide the mathematical symbols and meanings for each variable, especially how the metric M is calculated during execution. This is crucial for determining whether the optimization model's equation (2) can be computed, but the paper does not address how $M=f_M (y ̂,v)$ is derived in the experiments."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "The procedure of the hyperparameter tuning of the experiment was not provided in the papar. Please provide this part of details."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "This paper proposed an innovative method which handle the regression problem when the loss function is unknow, and this problem has not been studied previously. Provide an interested method that modeling the loss with a model (Loss2Net)."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper introduces a novel framework, Loss2Net, designed for regression tasks where the optimal loss function is unknown a priori. Traditional loss functions like MSE may not align with task-specific performance metrics, leading to suboptimal predictions. Loss2Net addresses this by jointly training a predictor network and a \"loss shaper\" network, which learns the unknown loss function directly from system feedback, without prior assumptions."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**Limited Theoretical Insight**: While the paper demonstrates empirical success, it lacks a rigorous theoretical foundation explaining why the proposed method performs well. Providing theoretical analysis on convergence, stability, and generalization properties would enhance the scientific rigor and credibility of Loss2Net. Such insights could help validate the model's robustness and its applicability across different problem settings, offering a deeper understanding of when and why this approach succeeds or encounters limitations.\n\n**Lack of Synthetic Experiments on Benchmark Datasets**: Although Loss2Net shows superior performance over standard losses like MSE and MSLE, the reasons behind this improvement remain unclear due to limited experimental analysis. Including synthetic experiments on widely-used benchmark regression datasets, would better illustrate the conditions under which Loss2Net outperforms conventional methods. Or even the synthetic dataset would provide a better understanding on the method. An experiment on synthetic dataset can help better understanding how the loss shaper works. Instead of a enxperiment on the real-world datasets. For example generate different target metrics (loss functions) and see how your Loss2Net fit those target loss functions better than the simple loss function, and how those simple loss function lose efficacy. \n \nBesides, using a benchmark experiments would also facilitate easier comparison for future research, enabling a clearer demonstration of Loss2Net’s advantages and limitations in standardized settings."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. The loss landscape is highly discontinuous and illustrates multiple stages each each has a very flat curvature. Can the author explain more about how the model can be trained in this scenario? And I suppose that once the loss function landed in the discontinuous points some numerical problem such as Nan may happen.\n\n2. Since the loss function is learned jointly with the main task model, the supervision of the loss function could be unstable while training. How does the learning curve of the model behave? \n\n3. How the loss function is initialised?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The motivation is clear and practical. \n2. The paper is clear and easy to follow."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The authors of this submission aim to improve the supervision provided by the task-agnostic loss function for regression tasks by learning loss functions. Instead of hand-designing loss function for specific regression tasks, a loss function parameterised by a neural network approximating the target loss function to enable differentiable feedback signal is designed and learned during the task model training. The method is empirically tested on two datasets and demonstrates improvements compared to baseline losses."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The proposed algorithm is closer to learning a loss function approximator when the target signal is not differentiable than the meta-learning loss function which is commonly instantiated by learning a reusable loss function or an auxiliary loss to improve the model’s performance. In addition, the algorithm framework is not close to any existing meta-learning algorithm framework. \n\n2. The experiments are very limited, using only shallow neural networks on two datasets. More applications and tasks can be found in [1]. The majority of the regression tasks in [1] are differentiable, so this can provide some task cases for whether the proposed algorithm can learn beyond the teacher. \n\n3. The writing of the paper is not well-structured. Discussion and compassion with other methods are presented too much in the method section. \n\n4. In remark 2, the prediction \\hat{y} is fed into predictor DNN with random vectors and is claimed as novel by the authors. However, it has been commonly applied to improve the robustness of the model. \n\n5. In the nondifferentiable cases, some gradient estimators, such as Reinforce, REBAR [2] and other advanced methods, could be the baselines for comparison. \n\n[1] Lathuilière S, Mesejo P, Alameda-Pineda X, Horaud R. A comprehensive analysis of deep regression. IEEE transactions on pattern analysis and machine intelligence. 2019 Apr 11;42(9):2065-81.\n\n[2] Tucker G, Mnih A, Maddison CJ, Lawson J, Sohl-Dickstein J. Rebar: Low-variance, unbiased gradient estimates for discrete latent variable models. Advances in Neural Information Processing Systems. 2017;30."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": {
"value": "N/A"
},
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See weaknesses"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "• The proposed method does not require prior knowledge of the (base) loss function structure as it is represented as a learnable meta-network, making it highly flexible for various application domains (despite the authors only really showing results in the domain of regression/time series).\n\n• The loss function is leaned jointly with the regressor in the same backpropagation process by minimizing the L2 loss between the meta-learned loss and the target performance metric, which can help improve alignment with the target performance metric. The method is capable of approximating non-differentiable performance metrics, which is a desirable characteristic."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "In this paper, the authors propose a method called Loss2Net, which aims to address the problem where the unknown prediction metric relationship is time-correlated, non-differentiable, or depends on intertwined predictions. The proposed method introduces a novel architecture and optimization approach that simultaneously learns the appropriate regressor and the loss function directly from system responses. Experiments on power grid and telecommunications infrastructure optimization demonstrate its effectiveness in real-world applications."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "•\tThe proposed method optimizes the loss network by minimizing the L2 loss between the meta-learned loss and the target performance metric, both on the training set. The authors don’t discuss in detail how this is different from prior methods such as ML^3 which used unrolled differentiation, on the validation loss. In my opinion, it is not at all clear why the Loss2Net objective can improve testing performance. It would be beneficial to outline theoretically why this is the case and empirically compare performance against unrolled differentiation. \n\n• In the background, the authors discuss multiple existing loss function learning methods, e.g., TaylorGLO, ML^3, EvoMAL. Despite this, there are no experiments directly comparing their proposed method to them, with only a modest number of experiments comparing to simple handcrafted loss functions such as MSE, MSLE, and $\\alpha$-OMC\n\n• The overall communication of the writing could be improved in my opinion, especially given that the proposed algorithm is relatively straightforward (Algorithm 1, page 6).\n\n• The background discussion, lines 138 – 143, is not correct. You mentioned that methods such as GLO can’t be applied to regression, this is not correct. Furthermore, EvoMAL’s experiments are predominantly on single-task learning, not multi-task learning. Furthermore, given that these methods can also optimize for non-differentiable performance metrics, it is of high importance that the authors further highlight the novelty of their proposed method.\n\n• I think for a conference such as ICLR which is less application-orientated relative to conferences such as KDD etc., I would have preferred more standard benchmarks that way readers could better gauge the performance of the proposed algorithm. For example, given the generality of the proposed method I don’t see why experiments couldn’t have been performed on CIFAR-10 or ImageNet, as opposed to power grid and telecommunications infrastructure optimization problems."
},
"withdrawal_confirmation": null
},
{
"TLDR": {
"value": "Loss2Net allows us to learn regression tasks without prior knowledge of the loss function, and can learn complex entangled non-differentiable losses."
},
"_bibtex": {
"value": "@inproceedings{\nanonymous2024lossnet,\ntitle={Loss2Net: Loss Meta-Learning for Regression with A-priori Unknown Metrics},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zhxATDLAmJ},\nnote={under review}\n}"
},
"abstract": {
"value": "There exist many practical applications where regression tasks must cope with a generally overseen problem: the output variable to be computed, which is often a decision variable, impacts the performance metric to optimize in a manner that is not known a priori. This challenge translates into a loss-metric mismatch, which makes standard loss functions such as Mean Square Error (MSE) not suitable because they significantly hinder the final performance. While this problem is of crucial importance in, e.g., many engineering and economic applications, the literature in meta-learning of loss functions has focused on other problems, such as classification or few-shot learning tasks. In this work, we aim at closing this research gap by proposing a model that can handle common situations in real systems where the unknown prediction-metric relationship is time-correlated, non-differentiable, or depends on multiple intertwined predictions. We present a novel loss meta-learning architecture for regression, named Loss2Net, which is able to (i) jointly learn the actual regressor and the loss function that it should minimize, directly from system responses; (ii) it does so without any assumption on the loss function structure; (iii) it provides a manner to learn non-differentiable and multi-dimensional loss functions from entangled performance metrics. Detailed experiments for power grid and telecommunications infrastructure optimization, grounded on real-world measurement data, demonstrate how Loss2Net can effectively learn unidentified loss functions."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"loss meta-learning",
"loss-metric mismatch",
"system management",
"unknown metric",
"transfer learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/8b729dd8d3093ea738d59d2f635e0f9bab1db5b3.pdf"
},
"presentation": null,
"primary_area": {
"value": "transfer learning, meta learning, and lifelong learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Loss2Net: Loss Meta-Learning for Regression with A-priori Unknown Metrics"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zi0XgnZlcl | MixLLM: Mixed-precision LLM Quantization with Algorithm-system Co-design | main | Active | LLM;Quantization;Mixed-precision | foundation or frontier models, including LLMs | 5;5;5;6 | 4;4;4;3 | 2;2;2;3 | 3;2;3;3 | 2;2;4;2 | 5.25 | 3.75 | 2.25 | 2.75 | 2.5 | -1 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 4
},
"primary_area": null,
"questions": {
"value": "1. In Eq 4, two design decisions that differentiate the proposed salience measure from previous work is to (1) use non-diagonal Fisher information matrix and (2) not ignoring first-order information. Can you provide ablation results show how important they are to the final results?\n\n2. MixLLM is evaluated with one fixed setting of 20% 8-bit + 80% 4-bit. It will be nice to see how the accuracy / latency / throughput change when this ratio is changed.\n\n3. The author makes the argument that the proposed one-pass method is faster / competitive to iterative methods. It will be nice to show how much speed-up / accuracy difference does this make."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "* The paper does a good job summarizing and providing useful critiques to different PTQ approaches in Section 2, including weight-only quantization, weight-activation quantization, outlier separation and mixed-precision approaches. Many of the assessment maybe subjective but accurate according to the reviewer's own experience.\n\n* This proposed method which uses salience information to perform packing of output channels into different bit-rate bucket is interesting and novel to the knowledge of the reviewer. This process is intuitive, and requires just a simple sorting instead of using iterative procedure, which as the authors pointed out, \"saves a lot of computation\" (Ln269). The results looks good on 7B-sized models."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents (1) a survey of existing post-training-quantization (PTQ) approaches; (2) a new approach for mixed precision based on global salience. \n\nThe proposed method defines a global salience measure of each output channel, similar to that of Kwon et al., 2022 and Kim et al., 2024 but with modifications. Different from previous works that use such salience information to perform sparse-and-dense decomposition, this paper packs output channels into two bit-rate buckets (8-bit symmetric and 4-bit asymmetric). The mixed-precision matmul is then carried out with a fused kernel which scatters output (of matmuls from different bitrate buckets) back to the corresponding indices."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "**The \"co-design\" analysis**\n\nWhile the paper claims \"algorithm-system co-design\", it doesn't contain much principled quantitative analysis. For a good \"co-design\" system paper, the reviewer would expect the author to use some kind of performance models, such as the roofline model to justify why the chosen design is a \"sweet spot\". Or at least use performance counters such as cache hit-rate, communication latency, memory bandwidth to substantiate the choice. In the current state, the paper relies too heavily on empirical observations and many claims are not very well explained. For example,\n* Ln46: \"Besides, the weight-only method can lead to system performance drop for large-batched workloads.\" \\\nIn what model and what's the batch size when this happens?\n\n* Ln75: \"MatMul execution tends to be bound more on the larger weight tensor rather than the smaller activation tensor, which weakens the need to push the activation smaller\" \\\n\"Weakens the need\" is a quite vague expression and it will be nice to draw your conclusion in a data-driven manner.\n\n* Ln695: \"Hard to achieve the peak performance due to the inefficiency of the sparse tensor computation on the GPU\"\\\nWhat is the utilization of the proposed method and how bad is the utilization on approaches that separate outliers in contrast?\n\n**Reproducibility**\n\nThe paper currently does not show throughput / latency numbers (or did I miss it?) nor was the inference kernel implementation provided. Because MixLLM uses different bit-rate (W4.8) and custom inference kernel (packing and scattering), it is important to carefully benchmark the latency and throughput against other methods. Currently the paper only says \"this function with the fused epilogue of MatMul to scatter the output to the corresponding indices, which is basically costless\" (Ln288) and \"MixLLM is on-pair for the system efficiency\nwhen compared to the state-of-the-art weight-activation quantization\" (Ln 466)\n\nWhile Github implementation is not generally required, the reviewer notes that most quantization papers do have OSS implementation (GPTQ, AWQ, LLM.int8, SmoothQuant, QoQ). Since the actual system performance of the scattering kernel is critical to the usefulness of this paper, it would be difficult to verify the results and seriously limit the usefulness of the paper without an implementation of the fused kernel."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1.\tCould you show the inference time of MixLLM in comparison to other methods under the same experimental setup?\n2.\tCould you provide a comparison of the performance and inference time of this method on larger models like LLaMA3 70B and Qwen2 72B against other methods?\n3.\tCould you provide the ablation experiments on performance and inference time for the 'two-step dequantization' proposed in this paper?"
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "This paper achieves better results on various LLMs, including Llama 3 8B, Llama 2 7B, Mistral 7B v0.3, Qwen 2 1.5B, and Qwen 2 7B. It also demonstrates improved performance across various benchmarks compared to QoQ, while maintaining similar memory consumption."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper proposes a new optimization approach for mixed precision quantization of output features, recognizing that different features have varying importance in the model. By allocating larger bit widths to the most critical features, MixLLM enhances accuracy while minimizing memory usage. To improve system efficiency, this paper also implements a two-step dequantization process, allowing the use of int8 Tensor Core computations and fast integer-float conversion to reduce dequantization overhead. The results demonstrate that MixLLM outperforms QoQ while maintaining similar memory consumption and system efficiency."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. In Table 1, QoQ fails to optimize the Qwen 2 1.5B and Qwen 2 7B models, leading to an unfair comparison.\n2. There is a lack of comparison of MixLLM's inference time with other methods.\n3. Figure 1 fails to clearly illustrate the complete process of MixLLM, and this paper includes only one figure."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "1. Experiments on Larger Models: The models tested in your work are relatively small in scale (up to 8B LLM). Conduct experiments on larger models, such as those with 30B or 70B is expected to demonstrate the scalability and effectiveness.\n\n2. Quantization Time Efficiency: This method involves calculating global loss and obtaining gradients to identify the output channels requiring higher bit-widths. Could you provide more details on the time efficiency of this quantization process? Specifically, how does the computational overhead compare to other quantization techniques, and does it significantly impact the overall efficiency when scaling to larger models?\n\n3. Latency Testing on Different GPUs: Given that your optimizations are designed to leverage GPU capabilities, have you conducted latency tests on different GPU architectures? Providing performance benchmarks across various GPUs would offer valuable support into the general applicability and potential limitations of your method in diverse hardware environments.\n\n4. Release of CUDA Kernel Code: To facilitate community validation and further research, are you planning to release the CUDA kernel code for your custom w8a8 computation? Sharing the code would enable others to replicate your results, integrate your optimizations into their own work, and contribute to advancements in this area."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The paper introduces a novel approach to pinpoint output channels that are difficult to quantize by utilizing global loss estimation and second-order Taylor expansion. This method allows for effective mixed-precision quantization of weights, specifically applying int4 and int8 to the output channels, which significantly reduces quantization loss without adding extra computational overhead.\n\n2. The design of a fast integer-to-float (i2f) conversion method is a noteworthy innovation. By leveraging GPU int8 computational capabilities and minimizing the latency typically associated with i2f conversions, the authors enhance computational efficiency, which is critical for high-performance applications.\n\n3. The paper is well-written, with clear descriptions and thorough explanations of the methods used. The authors provide comprehensive experiments that demonstrate their approach's effectiveness, showing that using int8 quantization on just 20% of the channels can achieve state-of-the-art results."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper address the performance degradation that often occurs when LLMs are quantized to low-bit representations. The authors propose a mixed-precision quantization method for model weights, specifically applying both int4 and int8 quantization to the output channels of the weights. The authors introduce an approach based on global loss estimation and second-order Taylor expansion. This method pinpoints the output channels that are most sensitive to quantization errors, allowing the allocation of higher precision to those critical channels. In addition, the authors design a fast i2f conversion technique to enhances the model's inference speed. Extensive experiments, the paper demonstrates that using 20% int8 quantization can achieve comparable performance with full-precision models."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Limited Model Size in Experiments: The models tested in the paper are relatively small. The absence of experiments on larger models, such as those with 30B or 70B parameters, raises questions about the scalability of the proposed method. Evaluating larger models would strengthen the paper by demonstrating the approach's effectiveness across a broader range of model sizes.\n\n2. Potential Efficiency Concerns: The quantization process requires performing global loss calculations and obtaining gradients, which may be computationally intensive. This could pose efficiency challenges, especially when dealing with very large models, and might offset some of the benefits gained from the quantization technique.\n\n3. Hardware Dependency: The implementation relies on specific w8a8 computational kernels to realize the performance gains. As a result, the acceleration may be significant only on certain GPUs that support these optimizations. This hardware dependency could limit the method's general applicability and usefulness across different computing environments that do not have the requisite hardware support."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "It would be better to expand the discussion on theoretical analysis behind MixLLM and the limitations of MixLLM."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "1. The introduction of MixLLM explores a novel optimization space in mixed-precision quantization through global salience identification.\n2. The paper clearly articulates how the two-step dequantization improves computational efficiency, particularly through leveraging Tensor Core capabilities, demonstrating both practicality and usability.\n3. The authors conduct extensive experiments across multiple tasks and models, providing compelling evidence for the advantages of MixLLM in terms of accuracy and efficiency."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents a method called MixLLM, aimed at achieving high accuracy and system efficiency through mixed-precision quantization. MixLLM introduces to identify the salience of output features based on loss distance estimates, focusing on global model loss rather than local layer loss. By assigning larger bit-widths to the most critical features, MixLLM achieve superior while maintaining low memory consumption. This paper presents a two-step dequantization process to reduce overhead and efficiently utilize int8 Tensor Core for computation. Experimental results demonstrate the effectiveness of MixLLM across various tasks compared to existing techniques."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. Although the study evaluates several models and datasets, the comparisons with some state-of-the-art methods are not sufficiently comprehensive, potentially limiting the generalizability of the results.\n2. The provided Algorithm 1 is not standardized. It should clearly specify the input and output, adhere to a standard for statement format, and include step numbers for each procedure.\n3. Provide detailed experimental setups, hyperparameters, and implementation specifics to Enhance Reproducibility."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024mixllm,\ntitle={Mix{LLM}: Mixed-precision {LLM} Quantization with Algorithm-system Co-design},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zi0XgnZlcl},\nnote={under review}\n}"
},
"abstract": {
"value": "Quantization has become one of the most effective methodologies to compress LLMs into smaller size.\nHowever, the existing quantization solutions still show limitations of either non-negligible accuracy drop or system inefficiency.\nIn this paper, we make a comprehensive\nanalysis of the general quantization principles on their effect to the triangle of accuracy, memory consumption and system efficiency.\nWe propose MixLLM that explores the new optimization space of mixed-precision quantization between output features based on the insight that different output features matter differently in the model.\nMixLLM identifies the output features with high salience in the global view rather than within each single layer,\neffectively assigning the larger bit-width to output features that need it most to achieve good accuracy with low memory consumption.\nWe present the sweet spot of quantization configuration of algorithm-system co-design that lead to high accuracy and system efficiency.\nTo address the system challenge of this sweet spot, we design the two-step dequantization to make use of the int8 Tensor Core easily and fast data type conversion to reduce dequantization overhead significantly.\nExtensive experiments show that MixLLM achieves the best accuracy on a variety of tasks for the popular LLMs than a set of state-of-the-art works.\nIt shows 0.31 lower perplexity and 0.43\\% improvement on zero shot tasks for Llama 3 8B than QoQ, with similar memory consumption and system efficiency."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"LLM",
"Quantization",
"Mixed-precision"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/443ce919e293fbc62df1fd5655b34421d3f8ca9a.pdf"
},
"presentation": null,
"primary_area": {
"value": "foundation or frontier models, including LLMs"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "MixLLM: Mixed-precision LLM Quantization with Algorithm-system Co-design"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |
|||||||
zi3MEZRCqd | Learning Robust Representations for Medical Images via Unifying (Self-)Supervisions | main | Active | medical image pre-training;medical image representation learning | unsupervised, self-supervised, semi-supervised, and supervised representation learning | 3;3;5;6;6 | 4;5;3;4;4 | 2;2;2;3;3 | 3;2;2;3;2 | 1;2;1;3;2 | 4.6 | 4 | 2.4 | 2.4 | 1.8 | -0.466252 | [
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 5
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "please refer to Weaknesses"
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "- The paper introduces unifying representations from multiple supervisions into a single embedding space for self-supervised learning and proposes a grouping strategy for mixed learning of representation vectors.\n\n- The model’s effectiveness is validated through evaluations across four different downstream tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper presents UmiF, a pre-training framework for medical image encoders that integrates multiple types of supervision, including self-supervision and annotations like segmentation labels, into a unified approach. UmiF creates a common embedding space with a token grouping strategy to leverage diverse data types for various downstream tasks. Pre-trained on 1.66 million samples from 14 public datasets, UmiF was evaluated in classification, segmentation, detection, retrieval, and VQA tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- **Contrastive Learning Design Concerns**: The design of the contrastive learning setup after grouping raises questions. According to the paper, a positive pair is represented by \\(f1_i, f0_j\\) where i and j are indices from different data points, meaning \\(f1_i\\) and \\(f0_j\\) are from different samples. Typically, a positive pair should be \\(f1_i, f0_i\\), where both elements come from the same sample, making the current approach unclear.\n\n- **Unfair Comparisons in Downstream Tasks**: There are substantial fairness issues in the downstream task comparisons. Competing models, such as Med-Unic and MGCA, are pre-trained on datasets with 380K and 217K samples respectively, whereas this study uses 1.66 million data pairs, including 1 million images. The model’s performance advantage in downstream tasks may stem from this large data disparity, making it difficult to attribute improvements solely to the proposed pre-training strategy.\n\n- **Performance in Table 2**: In Table 2, despite using more training data and supervision than Med-Unic, the proposed model does not achieve the best performance, which raises questions about the efficiency of the approach.\n\n- **Limited Ablation Study on Parameter r**: In the ablation study on the parameter r, only 1% of the RSNA dataset is used, rather than the full dataset, and no similar experiments are conducted on other datasets. It is unclear if the chosen r value on RSNA is robust and generalizable to other tasks, as this limited evaluation does not provide strong evidence of robustness.\n\n- **Inconsistencies Between Text and Figures**: There are inconsistencies between the text and figures. For instance, the text describes vector groups as Group 1 and Group 0, but the figure labels them as Group 1 and Group 2. \n\n- **CLS Token Generation Unclear**: The generation of the CLS token information is not clearly explained. According to the figure, the CLS token appears to be an output of the Flexible Token Grouping, but the paper does not specify how the CLS token is produced. Further clarification on this process would improve understanding."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 2
},
"primary_area": null,
"questions": {
"value": "See section of Weaknesses"
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- The framework combines multiple types of supervision (self-supervision, segmentation, and textual descriptions), which allows UmiF to accommodate and generalize well across a variety of medical image tasks.\n\n- By designing a unified token space and a novel flexible token grouping strategy, the authors effectively manage diverse data sources, which is essential given the limited size and annotation diversity in medical datasets.\n\n- The study is based on a large pre-training dataset of 1.66 million samples from 14 public datasets."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The paper presents a pre-training framework named UmiF (Unified Medical Image Framework), trained with diverse supervision types, such as self-supervision and external supervision (e.g., segmentation annotations, text descriptions), aiming to create robust, task-agnostic representations for medical images. UmiF converts various supervision inputs into token embeddings, utilizing a unified token space and flexible token grouping for contrastive learning and mask modeling. The pre-trained model yields state-of-the-art performance on several downstream tasks, including classification, segmentation, and visual question answering."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- UmiF is trained solely on publicly available datasets, and it is primarily focused on 2D X-ray images. What about the generalizability of UmiF to other medical imaging modalities like CT or MRI? And what about the generalizability to domain shift problem due to differences in patient populations and equipment quality. \n\n- There is a paper has the similar motivation. This paper unifies different data sources by homogenizing every supported input and output (including image, language, segmentation, bounding box…) into a sequence of discrete vocabulary tokens. However, this paper is not cited and compared in related work as well as experimental sections.\n[1*] Lu J, Clark C, Zellers R, et al. Unified-io: A unified model for vision, language, and multi-modal tasks[C]//The Eleventh International Conference on Learning Representations. 2022.\n\n- The motivation of flexible token grouping strategy is missing. I am wondering how the authors came up with this method to unify tokens from diverse data sources.\n\n- In Table 6, it is not clear why r=0.7 also shows a very good performance. According to other results of r>0.2, the tendency is the performance decreases with the increasing of r. Moreover, it is not clear why r>0.8 will lead to large performance descent. \n\n- In Table 7, it is not clear why image-segment shows the worst performance in most cases. Is it because tokens of segment is not good to represent the segment?\n\n- No ablation study of two losses.\n\n- Some typos, such as Tab 5 (but the authors used Table 4, 6 etc.). In line 511, one reference is ‘?’"
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 3
},
"contribution": {
"value": 2
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "Please address weaknesses #2&4. The paper could use some professional editing services."
},
"rating": {
"value": 5
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "1. The authors have gone through tremendous effort in collecting and inventorying the datasets. I can imagine the implementation wouldn't be easy to iron out the differences in the datasets and put them together to train one model. For that, I believe the significance of the paper should be pointed out.\n\n2. the benchmarking is comprehensive, ranging over the common medical image analysis tasks."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper introduces a multi-supervision unification strategy for medical image pretraining. The method allows report, segmentation, and classification (+ some others) types of supervision to jointly train one representation. The used modality is Chest X-ray (CXR). The authors collected a large-scale dataset sourced from the public domain, reaching 1M images and 1.66M supervision labels. It is reported the model, namely, UmiF reaches SOTA for a number of downstream tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "1. The clarity of the paper is a concern. Many places in the text lack proper explanation and are somewhat confusing. For example, L230 \"This interesting design allows more diverse views and enriches the learning tasks with many possibilities, surpassing previous VL learning approaches.\", The authors should clearly state why it is interesting. What are the many possibilities? What are the other diverse views (isn't the modality just CXR)? What evidence indicates your method surpasses the previous VL learning approach?\n2. Furthermore, Sec 3.2, perhaps the most important section in the paper is not well written, I've read it a few times and I still don't believe I have grasped the exact approach. \n3. I find Figure 1 hard to follow, the quantities in Sec 3.2 should be mapped to the figure. I also don't get the colour coding in Figure 1 for those tokens. The yellow/blue/no boundary cubes are also a very confusing way of presentation.\n4. The improvement over the previous state-of-the-art is marginal around 1 point in various measurements. As the authors claim a large-scale dataset of 1.66M image-supervision pairs vs \"previous effort of mostly limited to 380K image-report pairs or 838K images\", it is worth rethinking whether the effort spent on training such a large model on the twice amount of data makes sense.\n5. The title claims \"learning robust representation for medical images ...\", medical images are not just CXR, I would recommend claiming a lesser scope unless common modalities such as MRI/CT are also used.\n6. In Sec 3.1, the authors use \"modality abstraction\", which sounds cool but I would say it is actually confusing, the procedure is a label format conversion."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 1
},
"primary_area": null,
"questions": {
"value": "- Q1: Did you ablate the importance of using the masking and the contrastive loss by themselves?\n- Q2: How was _r_ chosen?\n- Q3: Shouldn't r be symmetric around 0.5? r=0 would just flip image embeddings to supervision and the other way around. Is this correct, or if not why not? \n- Q4: Have you tried replacing the mixing with a standard masking/token drop-out layer? Would be interesting to see if one actually has to mix tokens or if the dropping of tokens provides a similar regularizing effect."
},
"rating": {
"value": 3
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 2
},
"strengths": {
"value": "The proposal to not only include Image-Report pairs but also Images with other Supervision signals is an interesting and to my knowledge novel premise for the medical domain. Their idea of mixing image and supervision tokens are also innovative.\nMoreover, the amount of experiments conducted is broad, highlighting the generality of the ViT feature extractor."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "The Authors propose UmiF, a framework that aims to unify pre-training of any arbitrary Image-Label pair to train a robust encoder for any modality. To achieve this the authors propose three tokenizers, that each allows embedding to a shared unified token space (One Image, One Text, One Image Labels/Segmentation). These image / label tokens are either left split or merged to a certain degree, before being used for SSL training through contrastive training in a CLIP'esque fashion or through a reconstruction task. \nThey train their model on a wide variety of paired pre-training datasets and evaluate it on a broad set of downstream tasks, highlighting the final performance of UmiF's method.\n\nWhile innovative, the experiments are insufficient to highlight the proposed methodology. The authors stack a) a larger pre-training dataset b) Token mixing c) Masked and Contrastive losses together and don't provide experiments that disentangle which part brings performance and which part does not. Moreover, the presentation and language used in this paper are of insufficient quality and need a lot of work."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "While the premises are highly interesting the paper in it's current form has some issues:\n1. **Stacking of contributions** Currently the authors stack a variety of things on-top and don't ablate it properly, namely a) a larger pre-training dataset b) the token mixing block and c) the multiple SSL losses. Currently, it is impossible for a reader to know if their methodology is better downstream than the competing methods, as they create a much larger training data corpus. Maybe it's the masked reconstruction component, maybe not.\n2. **Presentation** The presentation of the paper as of right now is poor. It was very hard to read, as the language quality leaves a lot of room for improvement and should be checked by an English speaker to rework the manuscript. Moreover, Fig. 1 does a bad job of explaining the method contributing to difficulties in understanding the proposed method. Figures in the appendix are badly presented: Fig. 2 text is way too large. Table 3 does not have a caption, The description of the Image-Segment Dataset (Section A.4) is basically non-existent and should be filled accordingly. \n3. **Reproducibility** Currently the author's don't sufficiently explain their configuration of their methods. How was _r_ chosen ? In the text it is mentioned sometimes 1 and sometimes random between [0,1]. How did the authors split their data? Was there a train-test split during pre-training and fine-tuning? Was there different weightings between the losses?\n\n### Minor Points\n- The ablation of _r_ values does not contain r of 0.9 and 1.0 - It's mentioned in the text that these performed substantially worse, but I would like to have these values included in the table. Moreover this table should provide not only RSNA 1% AUC values. The downstream adaptation are just learning of a linear-layer so please provide ablations on more datasets and all values to show if the mixing of tokens actually provides a benefit.\n- The distinction between what this paper does relative to other paper's feels not well worked out. It would help a lot to see what makes this work distinct.\n- Similarities to MedUniC Paper. This paper's Table 2 is very similar to their Table 2 -- I would prefer to highlight this in the caption. \n- There are so many typos in this manuscripts. E.g. spellings of baseline methods: MedKLIIP/MedKILP/MedKLIP. It feels like no one proof-read this paper ever.\n- The Algorithm 1 is way too text heavy. If the authors want to go into detail about the sampling of their datasets they should move this into a separate algorithm to keep readability high.\n- The authors mention the importance of sampling smaller dataset more regularly but provide no results. Would be great to see an ablation table on this claim in the appendix."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": null,
"abstract": null,
"anonymous_url": null,
"authorids": null,
"authors": null,
"code_of_conduct": {
"value": "Yes"
},
"code_of_ethics": null,
"comment": null,
"confidence": {
"value": 4
},
"contribution": {
"value": 3
},
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": {
"value": [
"No ethics review needed."
]
},
"keywords": null,
"large_language_models": null,
"no_acknowledgement_section": null,
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": null,
"presentation": {
"value": 3
},
"primary_area": null,
"questions": {
"value": "Please address the comment in weakness part."
},
"rating": {
"value": 6
},
"reciprocal_reviewing": null,
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": {
"value": 3
},
"strengths": {
"value": "- They collected a large scale medical dataset from many supervision tasks for pre-training purposes, which are all public datasets.\n- I like the idea and method of tokenizing images, text, etc. to unify all common types of supervisions into a pre-training framework for medical encoders.\n- Various experiments are conducted to verify whether the new pre-training framework is good."
},
"student_author": null,
"submission_guidelines": null,
"summary": {
"value": "This paper propose a new unified pre-training framework to pre-train the model on medical images by unifying all common types of supervisions. They first convert all input data into token embeddings including image and language, segmentation (depend on the task of the dataset) modalities. Then, they apply a flexible grouping strategy that split token embeddings into two groups before feeding these two groups into a VIT and consider it as positive pairs for contrastive learning task. They also apply mask modeling on these two groups. They collect 1.66M samples from 14 public datasets and pre-train on this dataset. They conduct experiments on many downstream tasks."
},
"supplementary_material": null,
"title": null,
"venue": null,
"venueid": null,
"weaknesses": {
"value": "- There are some common self-supervised pre-training methods that you need to compare your method with to make sure your pre-training framework on medical images is strong because these methods can also utilize images in many datasets from different tasks and consider images and corresponding augmentation version as positive pairs before using contrastive algorithms such as InfoNCE, Graph-Matching, etc. to pre-train the model (instead of using two groups of token embeddings obtained from flexible grouping strategy as positive pairs for contrastive algorithm as in your method). For example:\n - Duy Minh Ho Nguyen et al. LVM-Med: Learning Large-Scale Self-Supervised Vision Models for Medical Imaging via Second-order Graph Matching. In NeurIPS, 2023\n - Adrien Bardes et al. Vicregl: Self-supervised learning of local visual features. In NeurIPS, 2022.\n - Mathilde Caron et al. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, 2021\n- The performance on downstream tasks are good but I think performance on classification and segmentation are not improved enough compared to Med-UniC (VIT-B). Can you give me an analysis of where and why the method shows improvements (or doesn't) compared to baselines ? It could provide more insight into the strengths and limitations of your method."
},
"withdrawal_confirmation": null
},
{
"TLDR": null,
"_bibtex": {
"value": "@inproceedings{\nanonymous2024learning,\ntitle={Learning Robust Representations for Medical Images via Unifying (Self-)Supervisions},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=zi3MEZRCqd},\nnote={under review}\n}"
},
"abstract": {
"value": "Pre-training medical image encoder to provide robust, task-agnostic representations is highly valuable, as it enhances the understanding of medical images and is important for performing many data-scarce analysis tasks. Current pre-training works are unable to integrate various types of supervisions, including self-supervision and external supervision such as segmentation annotations, while they are highly valuable for medical image understanding. Therefore, in this paper, we take the first step toward exploring unifying all common types of supervisions into a pre-training framework through a same scalable way. This require the pre-training framework being both unified, for accommodating diverse data and extensible, and effective, for making heterogeneous data synergistically assist unknown downstream tasks. To this end, we propose UmiF, whose principle is that once converted into token embeddings in a unified space, all diverse supervisions can be effectively utilized via contrastive learning and mask modeling with a same way. With UmiF, we pre-train on 1.66M samples from 14 public datasets, significantly surpassing previous efforts in terms of the dataset scale. We obtain and release the UmiF model, which achieved state-of-the-art performance across various downstream tasks, including classification, segmentation, and detection, retrieval and VQA."
},
"anonymous_url": {
"value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity."
},
"authorids": null,
"authors": null,
"code_of_conduct": null,
"code_of_ethics": {
"value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics."
},
"comment": null,
"confidence": null,
"contribution": null,
"desk_reject_comments": null,
"details_of_ethics_concerns": null,
"flag_for_ethics_review": null,
"keywords": {
"value": [
"medical image pre-training",
"medical image representation learning"
]
},
"large_language_models": null,
"no_acknowledgement_section": {
"value": "I certify that there is no acknowledgement section in this submission for double blind review."
},
"other_comments_on_LLMs": null,
"paperhash": null,
"pdf": {
"value": "/pdf/901796e0b8b3eeca3d767c044473e3b38b2d8cc1.pdf"
},
"presentation": null,
"primary_area": {
"value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning"
},
"questions": null,
"rating": null,
"reciprocal_reviewing": {
"value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6."
},
"resubmission": null,
"revert_desk_rejection_confirmation": null,
"revert_withdrawal_confirmation": null,
"soundness": null,
"strengths": null,
"student_author": null,
"submission_guidelines": {
"value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide."
},
"summary": null,
"supplementary_material": null,
"title": {
"value": "Learning Robust Representations for Medical Images via Unifying (Self-)Supervisions"
},
"venue": {
"value": "ICLR 2025 Conference Submission"
},
"venueid": {
"value": "ICLR.cc/2025/Conference/Submission"
},
"weaknesses": null,
"withdrawal_confirmation": null
}
] |